def doesAlleleHaveAssays(alleleKey): """ Returns true or false if allele has any expression assay data """ existsSQL = ''' select 1 where exists ( select 1 from gxd_allelegenotype ag join gxd_gellane gl on ( gl._genotype_key=ag._genotype_key ) where ag._allele_key=%d ) or exists ( select 1 from gxd_allelegenotype ag join gxd_specimen s on ( s._genotype_key=ag._genotype_key ) where ag._allele_key=%d ) ''' % (alleleKey, alleleKey) results, col_defs = performQuery(existsSQL) return len(results) > 0
def processReportScript(text, kwargs={}): """ Takes a text with sql statements and <code></code> tags. Processes them and returns any results, columns """ results = [] columns = [] text = _replaceArgs(text, kwargs) commands = _getSqlAndCodeCommands(text) for command in commands: cmdScript = command['command'] #app.logger.debug("processing %s script: \n%s\n" % (command['type'], cmdScript)) if command['type'] == SQL_TYPE: #try: results, columns = performQuery(cmdScript) results = [r.values() for r in results] #except QueryError, e: # print cmdScript # raise QueryError elif command['type'] == CODE_TYPE: exec cmdScript return results, columns
def processReportScript(text, kwargs={}): """ Takes a text with sql statements and <code></code> tags. Processes them and returns any results, columns """ results = [] columns = [] text = _replaceArgs(text, kwargs) commands = _getSqlAndCodeCommands(text) for command in commands: cmdScript = command['command'] #app.logger.debug("processing %s script: \n%s\n" % (command['type'], cmdScript)) if command['type'] == SQL_TYPE: #try: results, columns = performQuery(cmdScript) results = [list(r.values()) for r in results] #except QueryError, e: # print cmdScript # raise QueryError elif command['type'] == CODE_TYPE: exec(cmdScript) return results, columns
def _querySortedHeaderMap(genotypeKeys): """ Fetches the headers for each genotypeKey Returns map of genotypeKey to list of sortedHeaders """ keyMap = {} for batch in batch_list(genotypeKeys, 100): termToHeaderSQL = ''' select vah._object_key, th.term as header, vah.sequencenum from voc_annotheader vah join voc_term th on (th._term_key=vah._term_key) where vah._object_key in (%s) order by vah._object_key, vah.sequencenum ''' % (','.join([str(k) for k in batch])) results, col_defs = performQuery(termToHeaderSQL) for r in results: genotypeKey = r[0] headerTerm = r[1] keyMap.setdefault(genotypeKey, []).append(headerTerm) return keyMap
def getChildProbe(key): """ TR12006/add link to child probe would be best if this could be done via "db.relationship" in mgipython/model/mgd/prb.py, but could not find a way to do so note : there may be > 1 child probe """ sql = ''' select p2.name, a.accid as mgiid from prb_probe p1, prb_probe p2, acc_accession a where p1._probe_key = %d and p1._probe_key = p2.derivedfrom and p2._probe_key = a._object_key and a._mgitype_key = 3 and a.preferred = 1 and a._logicaldb_key = 1 ''' % (key) results, cols = performQuery(sql) if len(results) == 0: return '' else: return results
def normalize_sequencenums(self, _user_key): """ Normalizes all the sequencenum fields in the MGI_SetMember table for EMAPA This is necessary after you've done any number of adds or deletes, because it can leave holes in the sequencenum order e.g [1,2,5,9,10] becomes [1,2,3,4,5] """ performQuery(""" select MGI_resetSequenceNum( 'MGI_SetMember', %d, %d ) """ % (self.emapa_clipboard_set_key, _user_key))
def doesProbeHaveAssays(_probe_key): """ return if the probe has expression data """ sql = ''' select 1 from gxd_probeprep pp join gxd_assay a on a._probeprep_key=pp._probeprep_key where pp._probe_key = %d ''' % _probe_key results, cols = performQuery(sql) return len(results) > 0
def doesAlleleHavePheno(alleleKey): """ Returns true or false if allele has any phenotype data """ existsSQL = ''' select 1 where exists ( select 1 from gxd_allelegenotype ag join voc_annot va on ( va._object_key=ag._genotype_key and va._annottype_key=%d ) where ag._allele_key=%d ) ''' % (Allele._mp_annottype_key, alleleKey) results, col_defs = performQuery(existsSQL) return len(results) > 0
def _queryGenotypeEdgeMap(genotypeKeys): """ Fetches the edges for each mp term of each genotypeKey Returns map of genotypeKey to map of parent _term_key to child _term_key """ keyMap = {} for batch in batch_list(genotypeKeys, 100): termToEdgesSQL = ''' select child_annot._object_key genotype_key, dc._ancestorobject_key parent_key, dc._descendentobject_key child_key from voc_annot child_annot, voc_annot parent_annot join dag_closure dc on ( _mgitype_key = %d and _ancestorobject_key = parent_annot._term_key ) join voc_term pt on pt._term_key=dc._ancestorobject_key join voc_term ct on ct._term_key=dc._descendentobject_key where child_annot._object_key in (%s) and child_annot._annottype_key = %d and parent_annot._object_key = child_annot._object_key and parent_annot._annottype_key = child_annot._annottype_key and dc._descendentobject_key = child_annot._term_key ''' % (VocTerm._mgitype_key, ','.join([str(k) for k in batch]), Genotype._mp_annottype_key) results, col_defs = performQuery(termToEdgesSQL) for r in results: genotypeKey = r[0] parentKey = r[1] childKey = r[2] keyMap.setdefault(genotypeKey, {}) keyMap[genotypeKey].setdefault(parentKey, []).append(childKey) return keyMap
def _queryTermToHeaderMap(genotypeKeys): """ Fetches the associations of MP header to annotated term for the list of genotypeKeys Returns map of genotypeKey to map of annotTermKey to headerTerm list """ keyMap = {} for batch in batch_list(genotypeKeys, 100): termToHeaderSQL = ''' select distinct va._object_key, va._term_key, th.term as header from dag_closure dc join voc_annotheader vah on (vah._term_key=dc._ancestorobject_key) join voc_term th on (th._term_key=vah._term_key) join voc_annot va on (va._term_key=dc._descendentobject_key and va._object_key=vah._object_key ) where dc._mgitype_key=%d and va._object_key in (%s) ''' % (VocTerm._mgitype_key, ','.join([str(k) for k in batch])) results, col_defs = performQuery(termToHeaderSQL) for r in results: genotypeKey = r[0] annotTermKey = r[1] headerTerm = r[2] keyMap.setdefault(genotypeKey, {}) headerMap = keyMap[genotypeKey] headerMap.setdefault(annotTermKey, []).append(headerTerm) return keyMap
# configure the multiple db binds # 'mgd' is for mgd app.config['SQLALCHEMY_DATABASE_URI'] = dburi app.config['SQLALCHEMY_BINDS'] = { "mgd": dburi, } # initialise the global db object from mgipython import modelconfig modelconfig.createDatabaseEngineFromApp(app, appCache=cache) db = modelconfig.db from mgipython.model.query import performQuery try: performQuery("select 1 from mgi_dbinfo") except: pass # Set logging for pretty printed queries from login.literalquery import literalquery from datetime import datetime import sqlparse from sqlalchemy import event from sqlalchemy.engine import Engine from sqlalchemy.event import listens_for from sqlalchemy.orm.query import Query import traceback import time
def searchTriageHistory( journal=None, selectedAreas=[], primaryCurators=[], selectedDate=datetime.now(), order="date", limit=None ): """ Get the triage history summary for the requested inputs order by order, either "date" or "journal" returns the results """ whereClauses = ["bda.modification_date > '%s' " % \ selectedDate.strftime("%Y-%m-%d")] if journal: journal = journal.lower() whereClauses.append("lower(journal) like '%s'" % journal) if primaryCurators: # map curators to journals journals = [] for login in primaryCurators: if login in PRIMARY_CURATOR_TO_JOURNAL_MAP: journals.extend(PRIMARY_CURATOR_TO_JOURNAL_MAP[login]) journals = [j.lower() for j in journals] journals = list(set(journals)) journalsClause = "(%s)" % ' OR '.join( ["lower(journal) like '%s'" % j for j in journals] ) whereClauses.append(journalsClause) if selectedAreas: whereClauses.append("abbreviation in ('%s')" % "','".join(selectedAreas)) whereClause = "where %s" % ' AND '.join(whereClauses) orderBy = "order by lastupdate desc" if order.lower() == "journal": orderBy = "order by journal, vol desc, issue desc" limitClause = "" if limit: limitClause = "limit %d" % limit # TODO(kstone): Find a way to use SQLAlchemy objects to gather this data efficiently queryTemplate = _getTriageHistoryQueryTemplate() query = queryTemplate % ({'where': whereClause, 'orderby': orderBy, 'limit': limitClause}) results, cols = performQuery(query) results = _packageRows(results, cols) return results
# configure the multiple db binds # 'mgd' is for mgd app.config['SQLALCHEMY_DATABASE_URI'] = dburi app.config['SQLALCHEMY_BINDS'] = { "mgd": dburi, } # initialise the global db object from mgipython import modelconfig modelconfig.createDatabaseEngineFromApp(app, appCache=cache) db = modelconfig.db from mgipython.model.query import performQuery try: performQuery("select 1 from mgi_dbinfo") except: pass # Set logging for pretty printed queries from login.literalquery import literalquery from datetime import datetime import sqlparse from sqlalchemy import event from sqlalchemy.engine import Engine from sqlalchemy.event import listens_for from sqlalchemy.orm.query import Query import traceback import time import string