def __init__(self, logger, dbi, owner): DBFormatter.__init__(self, logger, dbi) self.owner = "%s." % owner if not owner in ("", "__MYSQL__") else "" self.logger = logger self.sql = \ """insert all when not exists (select * from %sprocessed_datasets where processed_ds_name=processed_n) then into %sprocessed_datasets(processed_ds_id, processed_ds_name) values (%sseq_psds.nextval, processed_n) when not exists (select * from %sdataset_access_types where dataset_access_type=access_t) then into %sdataset_access_types(dataset_access_type_id, dataset_access_type) values (%sseq_dtp.nextval, access_t) when exists (select data_tier_id from %sdata_tiers where data_tier_name=tier) then into %sdatasets ( dataset_id, dataset, primary_ds_id, processed_ds_id, data_tier_id, dataset_access_type_id, acquisition_era_id, processing_era_id, physics_group_id, xtcrosssection, prep_id, creation_date, create_by, last_modification_date, last_modified_by ) values ( :dataset_id, :dataset, :primary_ds_id, nvl((select processed_ds_id from %sprocessed_datasets where processed_ds_name=processed_n), %sseq_psds.nextval), (select data_tier_id from %sdata_tiers where data_tier_name=tier), nvl((select dataset_access_type_id from %sdataset_access_types where dataset_access_type=access_t), %sseq_dtp.nextval), :acquisition_era_id, :processing_era_id, :physics_group_id, :xtcrosssection, :prep_id, cdate, cby, :last_modification_date, :last_modified_by ) select :processed_ds_name processed_n, :data_tier_name tier, :dataset_access_type access_t, :creation_date cdate, :create_by cby from dual""" %((self.owner,)*13)
def __init__(self, logger, dbi, owner): """ Add schema owner and sql. """ DBFormatter.__init__(self, logger, dbi) self.owner = "%s." % owner if not owner in ("", "__MYSQL__") else "" self.sql = """UPDATE %sBLOCKS SET OPEN_FOR_WRITING = :open_for_writing where BLOCK_NAME = :block_name""" % self.owner
def fixDBSmissingFileAssoc(): os.environ['WMAGENT_CONFIG'] = '/data/srv/wmagent/current/config/wmagent/config.py' connectToDB() myThread = threading.currentThread() formatter = DBFormatter(logging, myThread.dbi) problemFilesSql = """ select dbsbuffer_file.id as fileid, dbsbuffer_location.id as seid from wmbs_file_location fl inner join wmbs_file_details fd on fd.id = fl.fileid inner join wmbs_location_senames wls on wls.location = fl.location inner join wmbs_location wl on wl.id = fl.location inner join dbsbuffer_location on dbsbuffer_location.se_name = wls.se_name inner join dbsbuffer_file on dbsbuffer_file.lfn = fd.lfn where fd.lfn in (select df.lfn from dbsbuffer_file df left outer join dbsbuffer_file_location dfl on df.id = dfl.filename where dfl.location is null) """ unfinishedTasks = formatter.formatDict(formatter.dbi.processData(problemFilesSql)) print("%s lenth" % len(unfinishedTasks)) result = {} for row in unfinishedTasks: result.setdefault(row["fileid"], row) print(row) print("trimed %s lenth" % len(result)) insertSQL = """INSERT INTO dbsbuffer_file_location (filename, location) VALUES (:fileid, :seid)""" done = formatter.dbi.processData(insertSQL, result.values()) print("inserted %s" % done)
def __init__(self, logger, dbi, owner=""): """ Add schema owner and sql. """ DBFormatter.__init__(self, logger, dbi) self.owner = "%s." % owner if not owner in ("", "__MYSQL__") else "" self.sql = """SELECT FILE_CLOB FROM %sFILE_BUFFERS WHERE BLOCK_ID=:block_id AND rownum < 10""" % self.owner
def main(): """ _main_ """ if 'WMAGENT_CONFIG' not in os.environ: os.environ['WMAGENT_CONFIG'] = '/data/srv/wmagent/current/config/wmagent/config.py' if 'manage' not in os.environ: os.environ['manage'] = '/data/srv/wmagent/current/config/wmagent/manage' connectToDB() myThread = threading.currentThread() formatter = DBFormatter(logging, myThread.dbi) # Get all the files available for each subscription print "Getting files available without location..." availFiles = formatter.formatDict(myThread.dbi.processData(getFilesAvailable)) print "Total files available: %s" % len(availFiles) uniqAvailFiles = list(set([x['fileid'] for x in availFiles])) availFiles = [{'fileid': x} for x in uniqAvailFiles] print "Total unique files available: %s" % len(uniqAvailFiles) cernID = formatter.formatDict(myThread.dbi.processData(getCERNLocation))[0] print "CERN location id: %s" % cernID if not cernID: print "You need to add T0_CH_CERN to the resource control db" sys.exit(1) for fid in availFiles: fid.update(cernID) myThread.dbi.processData(updateFileLocation, availFiles) print "Done!" sys.exit(0)
def __init__(self, logger, dbi, owner): """ Add schema owner and sql. """ DBFormatter.__init__(self, logger, dbi) self.owner = "%s." % owner if not owner in ("", "__MYSQL__") else "" self.sql1 = \ """ SELECT R.RELEASE_VERSION, P.PSET_HASH, P.PSET_NAME, A.APP_NAME, O.OUTPUT_MODULE_LABEL, O.GLOBAL_TAG, O.CREATION_DATE, O.CREATE_BY """ self.sql2 = \ """ from %sOUTPUT_MODULE_CONFIGS O JOIN %sRELEASE_VERSIONS R ON O.RELEASE_VERSION_ID=R.RELEASE_VERSION_ID JOIN %sAPPLICATION_EXECUTABLES A ON O.APP_EXEC_ID=A.APP_EXEC_ID JOIN %sPARAMETER_SET_HASHES P ON O.PARAMETER_SET_HASH_ID=P.PARAMETER_SET_HASH_ID """ % ( self.owner, self.owner, self.owner, self.owner )
def __init__(self, logger, dbi, owner=""): """ Add schema owner and sql. """ DBFormatter.__init__(self, logger, dbi) self.owner = "%s." % owner if not owner in ("", "__MYSQL__") else "" self.sql = """ SELECT B.BLOCK_NAME FROM %sBLOCKS B """ % self.owner
def __init__(self, logger, dbi, owner): """ Add schema owner and sql. """ DBFormatter.__init__(self, logger, dbi) self.owner = "%s." % owner if not owner in ("", "__MYSQL__") else "" self.sql = """UPDATE %sBLOCKS SET FILE_COUNT=:file_count, BLOCK_SIZE=:block_size where BLOCK_ID=:block_id""" % self.owner
def __init__(self, logger, dbi, owner=""): """ Add schema owner and sql. """ DBFormatter.__init__(self, logger, dbi) self.owner = "%s." % owner if not owner in ("", "__MYSQL__") else "" self.basesql = " D.DATASET FROM %sDATASETS D " % self.owner
def __init__(self, logger, dbi, owner=""): """ Add schema owner and sql. """ DBFormatter.__init__(self, logger, dbi) self.owner = "%s." % owner if not owner in ("", "__MYSQL__") else "" self.sql = """DELETE FROM %sFILE_BUFFERS WHERE LOGICAL_FILE_NAME=:logical_file_name""" % self.owner
def __init__(self, logger, dbi, owner): """ Add schema owner and sql. """ DBFormatter.__init__(self, logger, dbi) self.owner = "%s." % owner if not owner in ("", "__MYSQL__") else "" self.sql = """UPDATE %sDATASETS SET IS_DATASET_VALID = :is_dataset_valid where DATASET = :dataset""" % self.owner
def __init__(self, logger, dbi, owner=""): """ Add schema owner and sql. """ DBFormatter.__init__(self, logger, dbi) self.owner = "%s." % owner if not owner in ("", "__MYSQL__") else "" self.sql = """SELECT B.BLOCK_ID as BLOCK_ID, B.DATASET_ID as DATASET_ID FROM %sBLOCKS B JOIN %sFILES FL ON FL.BLOCK_ID=B.BLOCK_ID LEFT OUTER JOIN %sFILE_PARENTS FP ON FP.PARENT_FILE_ID = FL.FILE_ID WHERE FP.THIS_FILE_ID IN ( """% ((self.owner,)*3)
def __init__(self, logger, dbi, owner): """ Add schema owner and sql. """ DBFormatter.__init__(self, logger, dbi) self.owner = "%s." % owner if not owner in ("", "__MYSQL__") else "" self.sql = ""
def __init__(self, logger, dbi, owner=""): """ Add schema owner and sql. """ DBFormatter.__init__(self, logger, dbi) self.logger = logger #logger.warning('I am in dataset dao init') self.owner = "%s." % owner if not owner in ("", "__MYSQL__") else "" self.basesql = \ """ D.DATASET_ID, D.DATASET, D.PREP_ID, D.XTCROSSSECTION, D.CREATION_DATE, D.CREATE_BY, D.LAST_MODIFICATION_DATE, D.LAST_MODIFIED_BY, P.PRIMARY_DS_NAME, PDT.PRIMARY_DS_TYPE, PD.PROCESSED_DS_NAME, DT.DATA_TIER_NAME, DP.DATASET_ACCESS_TYPE, AE.ACQUISITION_ERA_NAME, PE.PROCESSING_VERSION, PH.PHYSICS_GROUP_NAME FROM %sDATASETS D JOIN %sPRIMARY_DATASETS P ON P.PRIMARY_DS_ID = D.PRIMARY_DS_ID JOIN %sPRIMARY_DS_TYPES PDT ON PDT.PRIMARY_DS_TYPE_ID = P.PRIMARY_DS_TYPE_ID JOIN %sPROCESSED_DATASETS PD ON PD.PROCESSED_DS_ID = D.PROCESSED_DS_ID JOIN %sDATA_TIERS DT ON DT.DATA_TIER_ID = D.DATA_TIER_ID JOIN %sDATASET_ACCESS_TYPES DP on DP.DATASET_ACCESS_TYPE_ID= D.DATASET_ACCESS_TYPE_ID LEFT OUTER JOIN %sACQUISITION_ERAS AE ON AE.ACQUISITION_ERA_ID = D.ACQUISITION_ERA_ID LEFT OUTER JOIN %sPROCESSING_ERAS PE ON PE.PROCESSING_ERA_ID = D.PROCESSING_ERA_ID LEFT OUTER JOIN %sPHYSICS_GROUPS PH ON PH.PHYSICS_GROUP_ID = D.PHYSICS_GROUP_ID """ % ((self.owner,)*9)
def __init__(self, logger, dbi, owner=""): """ Add schema owner and sql. """ DBFormatter.__init__(self, logger, dbi) self.owner = "%s." % owner if not owner in ("", "__MYSQL__") else "" self.sql = """SELECT MIN(MIGRATION_REQUEST_ID) AS MIGRATION_REQUEST_ID FROM %sMIGRATION_REQUESTS WHERE MIGRATION_STATUS='PENDING' """ % (self.owner)
def __init__(self, logger, dbi, owner): DBFormatter.__init__(self, logger, dbi) self.owner = "%s." % owner if not owner in ("", "__MYSQL__") else "" self.logger = logger self.sql = \ """insert all when not exists (select * from %sfile_data_types where file_type = file_t) then into %sfile_data_types(file_type_id, file_type) values( %sseq_ft.nextval, file_t ) when 1 = 1 then into %sfiles (file_id, logical_file_name, is_file_valid, dataset_id, block_id, file_type_id, check_sum, event_count, file_size, adler32, md5, auto_cross_section, last_modification_date, last_modified_by) values (:file_id, :logical_file_name, :is_file_valid, :dataset_id, :block_id, nvl(( select file_type_id from %sfile_data_types where file_type = file_t ), %sseq_ft.nextval ), :check_sum, :event_count, :file_size, :adler32, :md5, :auto_cross_section, :last_modification_date, :last_modified_by) select :file_type file_t from dual """ % ((self.owner,)*6) #Move these part to business layer. YG 11/23/2010 #def formatBinds(self, daoinput): """
def __init__(self, logger, dbi, owner): """ Add schema owner and sql. """ DBFormatter.__init__(self, logger, dbi) self.owner = "%s." % owner if not owner in ("", "__MYSQL__") else "" self.sql = """UPDATE %sFILES SET IS_FILE_VALID = :is_file_valid where LOGICAL_FILE_NAME = :logical_file_name""" % self.owner
def __init__(self, config = {}): """ __DatabasePage__ A page with a database connection (a WMCore.Database.DBFormatter) held in self.dbi. Look at the DBFormatter class for other handy helper methods, such as getBinds and formatDict. The DBFormatter class was originally intended to be extensively sub-classed, such that it's subclasses followed the DAO pattern. For web tools we do not generally do this, and you will normally access the database interface directly: binds = {'id': 123} sql = "select * from table where id = :id" result = self.dbi.processData(sql, binds) return self.formatDict(result) Although following the DAO pattern is still possible and encouraged where appropriate. However, if you want to use the DAO pattern it may be better to *not* expose the DAO classes and have a normal DatabasePage exposed that passes the database connection to all the DAO's. """ TemplatedPage.__init__(self, config) dbConfig = ConfigDBMap(config) conn = DBFactory(self, dbConfig.getDBUrl(), dbConfig.getOption()).connect() DBFormatter.__init__(self, self, conn) myThread = threading.currentThread() myThread.transaction = Transaction(conn) myThread.transaction.commit() return
def testBFormatting(self): """ Test various formats """ myThread = threading.currentThread() dbformatter = DBFormatter(myThread.logger, myThread.dbi) myThread.transaction.begin() result = myThread.transaction.processData(myThread.select) output = dbformatter.format(result) self.assertEqual(output , [['value1a', 'value2a'], \ ['value1b', 'value2b'], ['value1c', 'value2d']]) result = myThread.transaction.processData(myThread.select) output = dbformatter.formatOne(result) print('test1 '+str(output)) self.assertEqual( output , ['value1a', 'value2a'] ) result = myThread.transaction.processData(myThread.select) output = dbformatter.formatDict(result) self.assertEqual( output , [{'bind2': 'value2a', 'bind1': 'value1a'}, \ {'bind2': 'value2b', 'bind1': 'value1b'},\ {'bind2': 'value2d', 'bind1': 'value1c'}] ) result = myThread.transaction.processData(myThread.select) output = dbformatter.formatOneDict(result) self.assertEqual( output, {'bind2': 'value2a', 'bind1': 'value1a'} )
def testBFormatting(self): """ Test various formats """ myThread = threading.currentThread() dbformatter = DBFormatter(myThread.logger, myThread.dbi) myThread.transaction.begin() result = myThread.transaction.processData(myThread.select) output = dbformatter.format(result) self.assertEqual(output, [["value1a", "value2a"], ["value1b", "value2b"], ["value1c", "value2d"]]) result = myThread.transaction.processData(myThread.select) output = dbformatter.formatOne(result) print("test1 " + str(output)) self.assertEqual(output, ["value1a", "value2a"]) result = myThread.transaction.processData(myThread.select) output = dbformatter.formatDict(result) self.assertEqual( output, [ {"bind2": "value2a", "bind1": "value1a"}, {"bind2": "value2b", "bind1": "value1b"}, {"bind2": "value2d", "bind1": "value1c"}, ], ) result = myThread.transaction.processData(myThread.select) output = dbformatter.formatOneDict(result) self.assertEqual(output, {"bind2": "value2a", "bind1": "value1a"})
def __init__(self, logger, dbi, owner): """ Add schema owner and sql. """ DBFormatter.__init__(self, logger, dbi) self.owner = "%s." % owner if not owner in ("", "__MYSQL__") else "" self.sql = """SELECT D.DATASET_ID FROM %sDATASETS D WHERE D.DATASET = :dataset""" % ( self.owner )
def __init__(self, logger, dbi, owner): """ Add schema owner and sql. """ DBFormatter.__init__(self, logger, dbi) self.owner = "%s." % owner if not owner in ("", "__MYSQL__") else "" self.sql = """UPDATE %sACQUISITION_ERAS SET END_DATE=:end_date where acquisition_era_name=:acquisition_era_name""" % self.owner
def __init__(self, logger, dbi, owner=""): """ Add schema owner and sql. """ DBFormatter.__init__(self, logger, dbi) self.owner = "%s." % owner if not owner in ("", "__MYSQL__") else "" self.sql = """SELECT FLBUF.LOGICAL_FILE_NAME FROM %sFILE_BUFFERS FLBUF JOIN %sFILES FL ON FL.LOGICAL_FILE_NAME=FLBUF.LOGICAL_FILE_NAME""" % (2*(self.owner,))
def __init__(self, logger, dbi, owner=""): """ Add schema owner and sql. """ DBFormatter.__init__(self, logger, dbi) self.logger = logger self.owner = "%s." % owner if not owner in ("", "__MYSQL__") else "" #all listFile APIs should return the same data structure defined by self.sql self.sql_sel = \ """ SELECT F.FILE_ID, F.LOGICAL_FILE_NAME, F.IS_FILE_VALID, F.DATASET_ID, D.DATASET, F.BLOCK_ID, B.BLOCK_NAME, F.FILE_TYPE_ID, FT.FILE_TYPE, F.CHECK_SUM, F.EVENT_COUNT, F.FILE_SIZE, F.BRANCH_HASH_ID, F.ADLER32, F.MD5, F.AUTO_CROSS_SECTION, F.CREATION_DATE, F.CREATE_BY, F.LAST_MODIFICATION_DATE, F.LAST_MODIFIED_BY """ self.sql_cond = \ """ FROM %sFILES F JOIN %sFILE_DATA_TYPES FT ON FT.FILE_TYPE_ID = F.FILE_TYPE_ID JOIN %sDATASETS D ON D.DATASET_ID = F.DATASET_ID JOIN %sBLOCKS B ON B.BLOCK_ID = F.BLOCK_ID JOIN %sDATASET_ACCESS_TYPES DT ON DT.DATASET_ACCESS_TYPE_ID = D.DATASET_ACCESS_TYPE_ID """ % ((self.owner,)*5)
def __init__(self, logger, dbi, owner=""): """ Add schema owner and sql. """ DBFormatter.__init__(self, logger, dbi) self.owner = "%s." % owner if not owner in ("", "__MYSQL__") else "" self.sql = """SELECT DISTINCT BLOCK_ID from %sFILE_BUFFERS""" % self.owner
def check_sites(oldsites, badsites, conn): """ Make sure that all sites from the old schema, that aren't in bad sites are in the new schema. """ logger = logging.getLogger('SiteDB Schema Upgrade: check_sites') sql = "select * from siteinfo where cms_name = :cms_name" formatter = DBFormatter(logger, conn) print "checking %s sites" % len(oldsites) for site in oldsites: test = None if not site in badsites: try: data = conn.processData(sql, {'cms_name': site['cms_name']}) test = formatter.formatDict(data) if len(test) > 0: testsite = test[0] if testsite == site: logger.debug("%s migrated correctly!" % site['cms_name']) else: logger.warning("Problem with %s" % site['cms_name']) logger.warning("%s != %s" % (testsite, site)) else: logger.warning("Problem with %s" % site['cms_name']) logger.warning("%s != %s" % (test, site)) except Exception, e: logger.warning("Problem with %s" % site['cms_name']) logger.warning("%s != %s" % (test, site)) logger.warning(e)
def main(): if 'WMAGENT_CONFIG' not in os.environ: os.environ['WMAGENT_CONFIG'] = '/data/srv/wmagent/current/config/wmagent/config.py' if 'manage' not in os.environ: os.environ['manage'] = '/data/srv/wmagent/current/config/wmagent/manage' timenow = int(time.time()) time6d = timenow - 6 * 24 * 3600 connectToDB() myThread = threading.currentThread() formatter = DBFormatter(logging, myThread.dbi) # Get list of workflows and number of jobs executing for more than 6 days binds = [{'timestamp': time6d}] wmbsJobsPerWf = formatter.formatDict(myThread.dbi.processData(getJobsExecuting, binds)) totalJobs = sum([int(item['count']) for item in wmbsJobsPerWf]) print "Found %d workflows with a total of %d jobs" % (len(wmbsJobsPerWf), totalJobs) #pprint(wmbsJobsPerWf) # Retrieve all jobs from condor schedd # it returns an iterator, so let's make it a list such that we can iterate over # it several times... why did I notice it only know?!?! schedd = condor.Schedd() jobs = list(schedd.xquery('true', ['ClusterID', 'ProcId', 'WMAgent_RequestName', 'JobStatus', 'WMAgent_JobID'])) # Retrieve their status from reqmgr2 and # add their wmbsId to the dict for item in wmbsJobsPerWf: item['status'] = getStatus(item['name']) item['condorjobs'] = [] for job in jobs: if job['WMAgent_RequestName'] == item['name']: item['condorjobs'].append(job['WMAgent_JobID']) #pprint(wmbsJobsPerWf) # time to have some ACTION for item in wmbsJobsPerWf: binds = [{'timestamp': time6d, 'wfname': item['name']}] jobIds = formatter.formatDict(myThread.dbi.processData(getWMBSIds, binds)) wmbsIds = [x['id'] for x in jobIds] print "%-100s in %s. Has %d wmbs and %d condor jobs" % (item['name'], item['status'], len(wmbsIds), len(item['condorjobs'])) # continue # Just skip it if there are condor jobs out there if len(item['condorjobs']) > 0 or item['status'] == 'UNKNOWN': continue newstatus = 'jobfailed' if item['status'] in ('acquired', 'running-open', 'running-closed') else 'cleanout' var = raw_input("Marking jobs from %s to %s: (Y/N) " % (item['status'], newstatus)) if var in ['Y', 'y']: print "UPDATED %s" % item['name'] binds = [] for x in jobIds: x['state'] = newstatus binds.append(x) myThread.dbi.processData(updateState, binds) print "Done!" sys.exit(0)
def __init__(self, logger, dbi, owner=""): """ Add schema owner and sql. """ DBFormatter.__init__(self, logger, dbi) self.owner = "%s." % owner if not owner in ("", "__MYSQL__") else "" #all listFile APIs should return the same data structure defined by self.sql self.sql = """select FILE_CLOB from %sFILE_BUFFERS WHERE BLOCK_ID=:block_id LIMIT 10""" % self.owner
def __init__(self, logger, dbi, owner): """ Add schema owner and sql. """ DBFormatter.__init__(self, logger, dbi) self.owner = "%s." % owner if not owner in ("", "__MYSQL__") else "" self.sql = """UPDATE %sDATASETS SET DATASET_ACCESS_TYPE_ID = ( select DATASET_ACCESS_TYPE_ID from %sDATASET_ACCESS_TYPES where DATASET_ACCESS_TYPE=:dataset_access_type) where DATASET = :dataset""" % ((self.owner,)*2)
def __init__(self, logger, dbi, owner): DBFormatter.__init__(self, logger, dbi) self.owner = "%s." % owner if not owner in ("", "__MYSQL__") else "" self.sql = \ """insert into %sfile_parents (this_file_id, parent_file_id) values(:this_file_id, (select file_id from %sfiles where logical_file_name=:parent_logical_file_name)) """ % ((self.owner,)*2)
def __init__(self, logger, dbi, owner): DBFormatter.__init__(self, logger, dbi) self.owner = "%s." % owner if not owner in ("", "__MYSQL__") else "" self.logger = logger self.sql = """INSERT INTO %sFILES (FILE_ID, LOGICAL_FILE_NAME, IS_FILE_VALID, DATASET_ID, BLOCK_ID, FILE_TYPE_ID, CHECK_SUM, EVENT_COUNT, FILE_SIZE, ADLER32, MD5, AUTO_CROSS_SECTION, LAST_MODIFICATION_DATE, LAST_MODIFIED_BY) VALUES (:file_id, :logical_file_name, :is_file_valid, :dataset_id, :block_id, :file_type_id, :check_sum, :event_count, :file_size, :adler32, :md5, :auto_cross_section, :last_modification_date, :last_modified_by) """ % self.owner #Move these part to business layer. YG 11/23/2010 #def formatBinds(self, daoinput): """
def formatDict(self, results): """ _formatDict_ Cast the file column to an integer as the DBFormatter's formatDict() method turns everything into strings. Also, fixup the results of the Oracle query by renaming 'fileid' to file. """ formattedResults = DBFormatter.formatDict(self, results) for formattedResult in formattedResults: if "file" in formattedResult.keys(): formattedResult["file"] = int(formattedResult["file"]) else: formattedResult["file"] = int(formattedResult["fileid"]) #Now the tricky part tempResults = {} for formattedResult in formattedResults: fileID = formattedResult['file'] if fileID not in tempResults.keys(): tempResults[fileID] = [] if "pnn" in formattedResult.keys(): if not formattedResult['pnn'] in tempResults[fileID]: tempResults[fileID].append(formattedResult["pnn"]) finalResults = [] for key in tempResults.keys(): tmpDict = {"file": key} if not tempResults[key] == []: tmpDict['locations'] = tempResults[key] finalResults.append(tmpDict) return finalResults
def format(self, result): result = DBFormatter.format(self, result) if len(result) == 0: return False else: return int(result[0][0])
def formatDict(self, result): """ _formatDict_ Format the query results into something that resembles the XML format PhEDEx expects for injection: {"location1": {"dataset1": {"block1": {"is-open": "n", "files": []}}}} """ dictResult = DBFormatter.formatDict(self, result) formattedResult = {} for row in dictResult: if row["location"] not in formattedResult: formattedResult[row["location"]] = {} locationDict = formattedResult[row["location"]] if row["dataset"] not in locationDict: locationDict[row["dataset"]] = {} datasetDict = locationDict[row["dataset"]] if row["blockname"] not in datasetDict: datasetDict[row["blockname"]] = {"is-open": "n", "files": []} return formattedResult
def __init__(self, logger, dbi, owner=""): """ Add schema owner and sql. """ DBFormatter.__init__(self, logger, dbi) self.logger = logger self.owner = "%s." % owner if not owner in ("", "__MYSQL__") else "" self.sql = \ """ SELECT PD.DATASET parent_dataset, PD.DATASET_ID parent_dataset_id, D.DATASET this_dataset FROM %sDATASETS PD JOIN %sDATASET_PARENTS DP ON DP.PARENT_DATASET_ID = PD.DATASET_ID JOIN %sDATASETS D ON D.DATASET_ID = DP.THIS_DATASET_ID """ % ((self.owner,)*3)
def formatDict(self, result): """ _formatDict_ Cast the integer attributes of the file object to integers as formatDict() will turn everything into a string. """ formattedResult = DBFormatter.formatDict(self, result)[0] formattedResult["id"] = int(formattedResult["id"]) formattedResult["merged"] = bool(int(formattedResult["merged"])) if formattedResult["events"] != None: formattedResult["events"] = int(formattedResult["events"]) if formattedResult["first_event"] != None: formattedResult["first_event"] = int( formattedResult["first_event"]) if "size" in formattedResult: formattedResult["size"] = formattedResult["size"] else: # The size column is named "filesize" in Oracle as size is # as reserved word. We'll handle this here to make things # easier in the Oracle version of this object. formattedResult["size"] = formattedResult["filesize"] del formattedResult["filesize"] if formattedResult["size"] != None: formattedResult["size"] = int(formattedResult["size"]) return formattedResult
def format(self, result): result = DBFormatter.format(self, result) if len(result) > 0: return result[0][0] else: return False
def formatResult(self, result): """ I need the result in a reasonable list. This will return None if there is no cksum """ formattedResult = {} dictVersion = DBFormatter.formatDict(self, result) if isinstance(dictVersion, list): if len(dictVersion) == 0: #Then it's empty return None else: #Otherwise there are several, and we have to record each one #I don't know how to do this yet. tmpDict = {} for entry in dictVersion: tmpDict.update({entry.get('cktype', 'Default'): entry.get('cksum', None)}) formattedResult['checksums'] = tmpDict else: formattedResult['checksums'] = {'Default': dictVersion.get('cksum', None)} if formattedResult == {'Default': None}: #Then the thing was empty anyway return None return formattedResult
def formatBulkDict(self, result): """ _formatBulkDict_ Formats a whole list of dictionaries """ formattedResult = {} listOfDicts = DBFormatter.formatDict(self, result) for entry in listOfDicts: tmpDict = {} tmpDict["id"] = int(entry["id"]) tmpDict["lfn"] = entry["lfn"] tmpDict["events"] = int(entry["events"]) tmpDict["first_event"] = int(entry["first_event"]) tmpDict['minrun'] = entry.get('minrun', None) if "size" in entry: tmpDict["size"] = int(entry["size"]) else: tmpDict["size"] = int(entry["filesize"]) del entry["filesize"] formattedResult[tmpDict['id']] = tmpDict return formattedResult
def format(self, result): results = DBFormatter.format(self, result) subIDs = [] for row in results: subIDs.append(row[0]) return subIDs
def format(self, results): "Build a list of tuples" result = [] results = DBFormatter.format(self, results) for item in results: result.append(tuple(item)) return result
def __init__(self, logger, dbi, owner): DBFormatter.__init__(self, logger, dbi) self.owner = "%s." % owner if not owner in ("", "__MYSQL__") else "" self.sql = \ """ insert into %sfile_lumis (run_num, lumi_section_num, file_id, event_count) values (:run_num, :lumi_section_num, :file_id, :event_count) """ % (self.owner) self.sql1 = \ """ insert into %sfile_lumis (run_num, lumi_section_num, file_id) values (:run_num, :lumi_section_num, :file_id) """ % (self.owner)
def __init__(self, logger, dbi, owner): """ Add schema owner and sql. """ DBFormatter.__init__(self, logger, dbi) self.owner = "%s." % owner if not owner in ("", "__MYSQL__") else "" self.sql = """SELECT O.OUTPUT_MOD_CONFIG_ID from %sOUTPUT_MODULE_CONFIGS O INNER JOIN %sRELEASE_VERSIONS R ON O.RELEASE_VERSION_ID=R.RELEASE_VERSION_ID INNER JOIN %sAPPLICATION_EXECUTABLES A ON O.APP_EXEC_ID=A.APP_EXEC_ID INNER JOIN %sPARAMETER_SET_HASHES P ON O.PARAMETER_SET_HASH_ID=P.PARAMETER_SET_HASH_ID WHERE A.APP_NAME = :app_name AND R.RELEASE_VERSION=:release_version AND P.PSET_HASH=:pset_hash AND O.OUTPUT_MODULE_LABEL=:output_module_label AND O.GLOBAL_TAG =:global_tag """ % ( self.owner, self.owner, self.owner, self.owner )
def format(self, results): """ _format_ """ result = DBFormatter.format(self, results) return {"fwjr_path": result[0][0], "taskName": result[0][1]}
def selectOne(self, query): """ execute a query.with only one result expected """ # db connect self.session.connect() # execute query results = self.session.processData(query) if (results.rowcount > 0): formatter = DBFormatter(self.logger, self.session) out = formatter.formatOne(results) else: out = None return out
def __init__(self, logger, dbi, owner): """ Add schema owner and sql. """ DBFormatter.__init__(self, logger, dbi) self.owner = "%s." % owner if not owner in ("", "__MYSQL__") else "" self.logger = logger self.sql = \ """ SELECT O.OUTPUT_MOD_CONFIG_ID from %sOUTPUT_MODULE_CONFIGS O JOIN %sRELEASE_VERSIONS R ON O.RELEASE_VERSION_ID=R.RELEASE_VERSION_ID JOIN %sAPPLICATION_EXECUTABLES A ON O.APP_EXEC_ID=A.APP_EXEC_ID JOIN %sPARAMETER_SET_HASHES P ON O.PARAMETER_SET_HASH_ID=P.PARAMETER_SET_HASH_ID WHERE """ % ( self.owner, self.owner, self.owner, self.owner )
def format(self, result): list = DBFormatter.formatDict(self, result) # [{'count': '2', 'name': 'none'}, {'count': '4', 'name': 'new'}] def map_function(item): return {item['name'] : item['count']} def reduce_function(x, y): x.update(y) return x return reduce(reduce_function, list(map(map_function, list)))
def __init__(self, logger, dbi, owner): """ Add schema owner and sql. """ DBFormatter.__init__(self, logger, dbi) self.owner = "%s." % owner if not owner in ("", "__MYSQL__") else "" #check before delete since rowcount is not supported in wmcore self.select = """ select count(*) as count from {owner}MIGRATION_REQUESTS WHERE MIGRATION_REQUEST_ID=:migration_rqst_id and create_by=:create_by and (migration_status=0 or migration_status=3 or migration_status=9) """.format(owner=self.owner) self.sql = """ Delete from %sMIGRATION_REQUESTS WHERE MIGRATION_REQUEST_ID=:migration_rqst_id """ % self.owner
def select(self, query): """ execute a query. """ # db connect self.session.connect() # -> WMCore.Database.ResultSet import ResultSet results = self.session.processData(query) if (results.rowcount > 0): formatter = DBFormatter(self.logger, self.session) out = formatter.format(results) else: out = None return out
def formatOneDict(self, result): """ _formatOneDict_ Return the row as a dict """ formattedResult = DBFormatter.formatDict(self, result)[0] return workUnitFormatter(formattedResult)
def __init__(self, logger, dbi, owner=""): """ Add schema owner and sql. """ DBFormatter.__init__(self, logger, dbi) self.owner = "%s." % owner if not owner in ("", "__MYSQL__") else "" self.parent_sql = """ select run_num as R, Lumi_section_num as L, file_id as pid from {owner}file_lumis fl where fl.file_id in (select file_id from {owner}files f where F.DATASET_ID in (select parent_dataset_id from {owner}dataset_parents dp inner join {owner}datasets d on d.dataset_id=DP.THIS_DATASET_ID """.format(owner=self.owner) self.child_sql = """ select run_num as R, Lumi_section_num as L, file_id as cid from {owner}file_lumis fl where fl.file_id in (select file_id from {owner}files f inner join {owner}blocks b on f.block_id = b.block_id """.format(owner=self.owner)
def __init__(self, logger, dbi, owner=""): """ Add schema owner and sql. """ DBFormatter.__init__(self, logger, dbi) self.logger = logger self.owner = "%s." % owner if not owner in ("", "__MYSQL__") else "" self.sql = \ """ SELECT B.BLOCK_ID, B.BLOCK_NAME, B.OPEN_FOR_WRITING, B.BLOCK_SIZE, B.FILE_COUNT, B.DATASET_ID, DS.DATASET, B.ORIGIN_SITE_NAME, B.CREATION_DATE, B.CREATE_BY, B.LAST_MODIFICATION_DATE, B.LAST_MODIFIED_BY """ self.fromsql = \ """ FROM %sBLOCKS B JOIN %sDATASETS DS ON DS.DATASET_ID = B.DATASET_ID """ % ((self.owner,)*2)
def __init__(self, logger, dbi, owner=""): """ Add schema owner and sql. """ DBFormatter.__init__(self, logger, dbi) self.owner = "%s." % owner if not owner in ("", "__MYSQL__") else "" #remove F.CREATION_DATE, F.CREATE_BY from the select , see TK#969. YG 7/13/2012 self.sql = \ """ SELECT F.LOGICAL_FILE_NAME, F.IS_FILE_VALID, FT.FILE_TYPE, F.CHECK_SUM, F.EVENT_COUNT, F.FILE_SIZE, F.BRANCH_HASH_ID, F.ADLER32, F.MD5, F.AUTO_CROSS_SECTION, F.LAST_MODIFICATION_DATE, F.LAST_MODIFIED_BY FROM %sFILES F JOIN %sFILE_DATA_TYPES FT ON FT.FILE_TYPE_ID = F.FILE_TYPE_ID JOIN %sBLOCKS B ON B.BLOCK_ID = F.BLOCK_ID """ % ((self.owner,)*3)
def format(self, results): """ _format_ """ results = DBFormatter.format(self, results) jobs = [] for result in results: jobs.append({"id": result[0], "fwjr_path": result[1]}) return jobs
def format(self, results): """ _format_ Format the query results into a list of LFNs. """ results = DBFormatter.format(self, results) status = [] for result in results: status.append(result[0]) return status
def formatList(self, results): """ _formatList_ """ formattedResults = DBFormatter.formatDict(self, results) tmpList = [] for entry in formattedResults: tmpList.append(entry['site_name']) return tmpList
def format(self, results): """ _format_ Return the couch document ID or None if one has not been set. """ result = DBFormatter.format(self, results) if len(result) == 0: return None return result[0][0]
def testBFormatting(self): """ Test various formats """ myThread = threading.currentThread() dbformatter = DBFormatter(myThread.logger, myThread.dbi) myThread.transaction.begin() result = myThread.transaction.processData(myThread.select) output = dbformatter.format(result) self.assertEqual(output , [['value1a', 'value2a'], \ ['value1b', 'value2b'], ['value1c', 'value2d']]) result = myThread.transaction.processData(myThread.select) output = dbformatter.formatOne(result) print('test1 ' + str(output)) self.assertEqual(output, ['value1a', 'value2a']) result = myThread.transaction.processData(myThread.select) output = dbformatter.formatDict(result) self.assertEqual( output , [{'bind2': 'value2a', 'bind1': 'value1a'}, \ {'bind2': 'value2b', 'bind1': 'value1b'},\ {'bind2': 'value2d', 'bind1': 'value1c'}] ) result = myThread.transaction.processData(myThread.select) output = dbformatter.formatOneDict(result) self.assertEqual(output, {'bind2': 'value2a', 'bind1': 'value1a'})
def __init__(self, logger, dbi, owner): DBFormatter.__init__(self, logger, dbi) self.owner = "%s." % owner if not owner in ("", "__MYSQL__") else "" self.sql = \ """INSERT ALL WHEN not exists(select app_exec_id from %sapplication_executables where app_name = app_n) THEN INTO %sapplication_executables(app_exec_id, app_name)values(%sseq_ae.nextval, app_n) WHEN not exists (select release_version_id from %srelease_versions where release_version = release_v) THEN INTO %srelease_versions(release_version_id, release_version) values (%sseq_rv.nextval, release_v) WHEN not exists(select parameter_set_hash_id from %sparameter_set_hashes where pset_hash = pset_h) THEN INTO %sparameter_set_hashes ( parameter_set_hash_id, pset_hash, pset_name ) values (%sseq_psh.nextval, pset_h, pset_name) WHEN 1=1 THEN INTO %soutput_module_configs ( output_mod_config_id, app_exec_id, release_version_id, parameter_set_hash_id, output_module_label, global_tag, scenario, creation_date, create_by ) values (%sseq_omc.nextval, NVL((select app_exec_id from %sapplication_executables where app_name = app_n),%sseq_ae.nextval), NVL((select release_version_id from %srelease_versions where release_version = release_v), %sseq_rv.nextval), NVL((select parameter_set_hash_id from %sparameter_set_hashes where pset_hash = pset_h), %sseq_psh.nextval), :output_module_label, :global_tag, :scenario, :creation_date, :create_by) select :app_name app_n, :release_version release_v, :pset_hash pset_h, :pset_name pset_name from dual """% ((self.owner,)*17)
def formatDict(self, results): """ _formatDict_ Cast the id column to an integer since formatDict() turns everything into strings. """ formattedResults = DBFormatter.formatDict(self, results) for formattedResult in formattedResults: formattedResult["id"] = int(formattedResult["id"]) return formattedResults