def update_models(): """Update the list of modelnames in the model selector after an analysis is selected. """ session = Session() Analysis = Tables()['Analysis'] aSelected = request.args.get('aSelected', type=str) extraModels = request.args.get('extraModels', type=str) extraAnalyses = request.args.get('extraAnalyses', type=str) search = request.args.get('modelSearch', type=str) extraModels = [s.strip() for s in extraModels.split(',')] extraAnalyses = [s.strip() for s in extraAnalyses.split(',')] global _previous_update_models pA, xM, xA, pM = _previous_update_models if (aSelected == pA) and (extraModels == xM) and (extraAnalyses == xA): model_list = pM else: modeltree = ( session.query(Analysis.modeltree) .filter(Analysis.name == aSelected) .first() ) # Pass modeltree string from Analysis to a ModelFinder constructor, # which will use a series of internal methods to convert the tree string # to a list of model names. # Then add any additional models specified in extraModels, and add # model_lists from extraAnalyses. if modeltree and modeltree[0]: model_list = _get_models(modeltree[0]) extraModels = [m for m in extraModels if (m not in model_list and m.strip() != '')] model_list.extend(extraModels) if extraAnalyses: analyses = ( session.query(Analysis.modeltree) .filter(Analysis.name.in_(extraAnalyses)) .all() ) for t in [a.modeltree for a in analyses]: extras = [m for m in _get_models(t) if m not in model_list] model_list.extend(extras) _previous_update_models = (aSelected, extraModels, extraAnalyses, model_list) else: _previous_update_models = ('', '', '', []) return jsonify(modellist="Model tree not found.") session.close() filtered_models = simple_search(search, model_list) return jsonify(modellist=filtered_models)
def batch_performance(): session = Session() Results = Tables()['Results'] cSelected = request.form['cSelected'] bSelected = request.form['bSelected'][:3] mSelected = request.form['mSelected'] findAll = request.form['findAll'] cSelected = cSelected.split(',') mSelected = mSelected.split(',') if int(findAll): results = psql.read_sql_query( session.query( Results.cellid, Results.modelname, Results.r_test ) .filter(Results.batch == bSelected) .statement, session.bind ) else: results = psql.read_sql_query( session.query( Results.cellid, Results.modelname, Results.r_test ) .filter(Results.batch == bSelected) .filter(Results.cellid.in_(cSelected)) .filter(Results.modelname.in_(mSelected)) .statement, session.bind ) # get back list of models that matched other query criteria results_models = [ m for m in list(set(results['modelname'].values.tolist())) ] # filter mSelected to match results models so that list is in the # same order as on web UI ordered_models = [ m for m in mSelected if m in results_models ] report = Performance_Report(results, bSelected, ordered_models) report.generate_plot() session.close() return render_template( 'batch_performance.html', script=report.script, div=report.div, bokeh_version=bokeh_version, )
def load_user(user_id): session = Session() Users = Tables()['Users'] try: # get email match from user database # (needs to be stored as unicode per flask-login) sqla_user = (session.query(Users).filter( Users.email == user_id).first()) if not sqla_user: sqla_user = BlankUser() # assign attrs from table object to active user instance user = User( username=sqla_user.username, # password should be stored as bcrypt hash # (generated when registered) password=sqla_user.password, labgroup=sqla_user.labgroup, sec_lvl=sqla_user.sec_lvl, ) return user except Exception as e: log.exception(e) return None finally: session.close()
def get_current_analysis(): """Populate the Analysis Editor form with the database contents for the currently selected analysis. """ session = Session() Analysis = Tables()['Analysis'] aSelected = request.args.get('aSelected') # If no analysis was selected, fill fields with blank text to # mimic 'New Analysis' behavior. if len(aSelected) == 0: return jsonify( name='', status='', tags='', question='', answer='', tree='', ) a = ( session.query(Analysis) .filter(Analysis.id == aSelected) .first() ) load, mod, fit = _get_trees(a.modeltree) session.close() return jsonify( id=a.id, name=a.name, status=a.status, tags=a.tags, question=a.question, answer=a.answer, load=load, mod=mod, fit=fit, )
def get_plot(cells, models, batch, measure, plot_type, only_fair=True, include_outliers=False, display=True): session = Session() Results = Tables()['Results'] results_df = psql.read_sql_query( session.query(Results) .filter(Results.batch == batch) .filter(Results.cellid.in_(cells)) .filter(Results.modelname.in_(models)) .statement, session.bind ) results_models = [ m for m in list(set(results_df['modelname'].values.tolist())) ] ordered_models = [ m for m in models if m in results_models ] PlotClass = getattr(plots, plot_type) plot = PlotClass( data=results_df, measure=measure, models=ordered_models, fair=only_fair, outliers=include_outliers, display=display ) session.close() return plot
def delete_analysis(): """Delete the selected analysis from the database.""" user = get_current_user() session = Session() Analysis = Tables()['Analysis'] success = False aSelected = request.args.get('aSelected') if len(aSelected) == 0: return jsonify(success=success) result = ( session.query(Analysis) .filter(Analysis.id == aSelected) .first() ) if result is None: return jsonify(success=success) if (result.public or (result.username == user.username) or (user.labgroup in result.labgroup)): success = True session.delete(result) session.commit() else: log.info("You do not have permission to delete this analysis.") return jsonify(success=success) session.close() return jsonify(success=success)
def cell_details(cellid): # Just redirects to celldb penetration info for now # Keeping as separate function incase want to add extra stuff later # (as opposed to just putting link directly in javascript) session = Session() gCellMaster = Tables()['gCellMaster'] url_root = 'http://hyrax.ohsu.edu/celldb/peninfo.php?penid=' i = cellid.find('-') cellid = cellid[:i] try: result = (session.query(gCellMaster).filter( gCellMaster.cellid == cellid).first()) except Exception as e: print(e) return Response("Couldn't open cell file -- " "this option is disabled for non-LBHB setup.") if not result: # return an error response instead? # or make it a dud link? seems wasteful to refresh page # could also redirect to celldb home page return redirect(url_for('main_view')) penid = result.penid session.close() return redirect(url_root + str(penid))
def update_tag_options(): user = get_current_user() session = Session() Analysis = Tables()['Analysis'] tags = [ i[0].split(",") for i in session.query(Analysis.tags) .filter(or_( Analysis.public == '1', Analysis.labgroup.ilike('%{0}%'.format(user.labgroup)), Analysis.username == user.username, )) .distinct().all() ] # Flatten list of lists into a single list of all tag strings # and remove leading and trailing whitespace. taglistbldupspc = [i for sublist in tags for i in sublist] taglistbldup = [t.strip() for t in taglistbldupspc] # Reform the list with only unique tags taglistbl = list(set(taglistbldup)) # Finally, remove any blank tags and sort the list. taglist = [t for t in taglistbl if t != ''] taglist.sort() session.close() return jsonify(taglist=taglist)
def check_analysis_exists(): """Check for a duplicate analysis name when an Analysis Editor form is submitted. If a duplicate exists, warn the user before overwriting. """ session = Session() Analysis = Tables()['Analysis'] nameEntered = request.args.get('nameEntered') analysisId = request.args.get('analysisId') exists = False result = ( session.query(Analysis) .filter(Analysis.name == nameEntered) .first() ) # only set to True if id is different, so that # overwriting own analysis doesn't cause flag if result and ( analysisId == '__none' or (int(result.id) != int(analysisId)) ): exists = True session.close() return jsonify(exists=exists)
def get_id(self): # user_id should be the unicode rep of e-mail address # (should be stored in db table in that format) session = Session() Users = Tables()['Users'] user_id = (session.query( Users.email).filter(Users.username == self.username).first()) session.close() return user_id
def __call__(self, form, field): session = Session() Users = Tables()['Users'] exists = (session.query(Users).filter( Users.email == field.data).first()) if exists: session.close() raise ValidationError(self.message) session.close()
def update_analysis(): """Update list of analyses after a tag and/or filter selection changes.""" user = get_current_user() session = Session() Analysis = Tables()['Analysis'] tagSelected = request.args.getlist('tagSelected[]') statSelected = request.args.getlist('statSelected[]') global _previous_update_analysis previous_tags, previous_stats, previous_analyses, previous_ids = _previous_update_analysis if (previous_tags == tagSelected) and (previous_stats == statSelected): analysislist = previous_analyses analysis_ids = previous_ids else: # If special '__any' value is passed, set tag and status to match any # string in ilike query. if '__any' in tagSelected: tagStrings = [Analysis.tags.ilike('%%')] else: tagStrings = [ Analysis.tags.ilike('%{0}%'.format(tag)) for tag in tagSelected ] if '__any' in statSelected: statStrings = [Analysis.status.ilike('%%')] else: statStrings = [ Analysis.status.ilike('%{0}%'.format(stat)) for stat in statSelected ] analyses = ( session.query(Analysis) .filter(or_(*tagStrings)) .filter(or_(*statStrings)) .filter(or_( int(user.sec_lvl) == 9, Analysis.public == '1', Analysis.labgroup.ilike('%{0}%'.format(user.labgroup)), Analysis.username == user.username, )) .order_by(asc(Analysis.id)) .all() ) analysislist = [ a.name for a in analyses ] analysis_ids = [ a.id for a in analyses ] session.close() return jsonify(analysislist=analysislist, analysis_ids=analysis_ids)
def reload_models(self): from nems_web.utilities.ModelFinder import ModelFinder t = self.comboBatch.currentText() session = Session() Analysis = Tables()['Analysis'] modeltree = (session.query(Analysis.modeltree).filter( Analysis.name == self.current_analysis).first()) modelextras = (session.query(Analysis.model_extras).filter( Analysis.name == self.current_analysis).first()) # Pass modeltree string from Analysis to a ModelFinder constructor, # which will use a series of internal methods to convert the tree string # to a list of model names. # Then add any additional models specified in extraModels, and add # model_lists from extraAnalyses. if modeltree and modeltree[0]: #model_list = _get_models(modeltree[0]) load, mod, fit = json.loads(modeltree[0]) loader = ModelFinder(load).modellist model = ModelFinder(mod).modellist fitter = ModelFinder(fit).modellist combined = itertools.product(loader, model, fitter) model_list = ['_'.join(m) for m in combined] extraModels = [ s.strip("\"\n").replace("\\n", "") for s in modelextras[0].split(',') ] model_list.extend(extraModels) else: model_list = [] self.all_models = model_list #sql = f"SELECT DISTINCT modelname FROM Results WHERE batch={t}" #data = nd.pd_query(sql) #self.all_models = data['modelname'].to_list() sql = f"SELECT DISTINCT cellid FROM Batches WHERE batch={t}" data = nd.pd_query(sql) self.all_cellids = data['cellid'].to_list() self.lastbatch = t self.labelBatchName.setText(str(t))
def check_instance_count(): session = Session() # TODO: How to check which jobs are currently running/waiting? num_jobs = (session.query(tQueue).filter( tQueue.progress == 'something').count()) ec2 = boto3.resources('ec2') instances = ec2.instances.filter(Filters=[{ 'Name': 'instance-state-name', 'Values': ['running'] }]) run_count = len(instances) if num_jobs / run_count > awsc.JOBS_PER_INSTANCE: # TODO: Where to get image id? ec2.create_instances(ImageId='<ami-image-id>', MinCount=1, MaxCount=5) elif num_jobs / run_count - 1 < sc.JOBS_PER_INSTANCE: ids = (session.query(tComputer).filter( not_(tComputer.name.in_(awsc.LOCAL_MACHINES)))) # Could also filter based on this criteria in query maybe ids.remove('remove ids based on some criteria - dont stop them all') ec2.instances.filter(InstanceIds=ids).stop() ec2.instances.filter(InstanceIds=ids).terminate()
def get_preview(): """Queries the database for the filepath to the preview image for the selected cell, batch and model combination(s) """ session = Session() Results = Tables()['Results'] # Only get the numerals for the selected batch, not the description. bSelected = request.args.get('bSelected', type=str)[:3] cSelected = request.args.getlist('cSelected[]') mSelected = request.args.getlist('mSelected[]') figurefile = None path = ( session.query(Results) .filter(Results.batch == bSelected) .filter(Results.cellid.in_(cSelected)) .filter(Results.modelname.in_(mSelected)) .first() ) if not path: session.close() return jsonify(image='missing preview') else: figurefile = str(path.figurefile) session.close() # Another temporary compatibility hack to convert # s3://... to https:// if figurefile.startswith('s3'): prefix = 'https://s3-us-west2.amazonaws.com' parsed = urlparse(figurefile) bucket = parsed.netloc path = parsed.path figurefile = prefix + '/' + bucket + '/' + path # TODO: this should eventually be the only thing that gets # called - above try/except ugliness is temporary for # backwards compatibility image_bytes = load_resource(figurefile) b64img = str(b64encode(image_bytes))[2:-1] return jsonify(image=b64img)
def get_saved_selections(): session = Session() Users = Tables()['Users'] user = get_current_user() user_entry = ( session.query(Users) .filter(Users.username == user.username) .first() ) if not user_entry: return jsonify(response="user not logged in, can't load selections") selections = user_entry.selections null = False if not selections: null = True session.close() return jsonify(selections=selections, null=null)
def get_filtered_cells(cells, batch, snr=0.0, iso=0.0, snr_idx=0.0): """Removes cellids from list if they do not meet snr/iso criteria.""" session = Session() NarfBatches = Tables()['NarfBatches'] snr = max(snr, 0) iso = max(iso, 0) snr_idx = max(snr_idx, 0) db_criteria = psql.read_sql_query( session.query(NarfBatches) .filter(NarfBatches.cellid.in_(cells)) .statement, session.bind ) if db_criteria.empty: log.warning("No matching cells found in NarfBatches," " no cellids were filtered.") return cells else: def filter_cells(cell): min_snr = min(cell.est_snr, cell.val_snr) min_isolation = cell.min_isolation min_snr_idx = cell.min_snr_index a = (snr > min_snr) b = (iso > min_isolation) c = (snr_idx > min_snr_idx) if a or b or c: try: cells.remove(cell.cellid) except ValueError: # cell already removed - necessary b/c pandas # tries function twice on first row, which causes # an error since our applied function has side-effects. pass return db_criteria.apply(filter_cells, axis=1) return cells
def update_status_options(): user = get_current_user() session = Session() Analysis = Tables()['Analysis'] statuslist = [ i[0] for i in session.query(Analysis.status) .filter(or_( Analysis.public == '1', Analysis.labgroup.ilike('%{0}%'.format(user.labgroup)), Analysis.username == user.username, )) .distinct().all() ] session.close() return jsonify(statuslist=statuslist)
def get_filtered_cells(cells, batch, snr=0.0, iso=0.0, snr_idx=0.0): """Removes cellids from list if they do not meet snr/iso criteria.""" session = Session() Batches = Tables()['Batches'] snr = max(snr, 0) iso = max(iso, 0) snr_idx = max(snr_idx, 0) db_criteria = psql.read_sql_query( session.query(Batches) .filter(Batches.cellid.in_(cells)) .filter(Batches.min_snr_index >= snr_idx) .filter(Batches.min_isolation >= iso) .filter(Batches.est_snr >= snr) .filter(Batches.val_snr >= snr) .statement, session.bind ) return list(set(db_criteria['cellid'].values.tolist()))
def set_saved_selections(): user = get_current_user() if not user.username: return jsonify( response="user not logged in, can't save selections", null=True, ) session = Session() Users = Tables()['Users'] saved_selections = request.args.get('stringed_selections') user_entry = ( session.query(Users) .filter(Users.username == user.username) .first() ) user_entry.selections = saved_selections session.commit() session.close() return jsonify(response='selections saved', null=False)
def update_batch(): """Update current batch selection after an analysis is selected.""" session = Session() Analysis = Tables()['Analysis'] blank = 0 aSelected = request.args.get('aSelected', type=str) batch = ( session.query(Analysis.batch) .filter(Analysis.name == aSelected) .first() ) try: batch = batch.batch except Exception as e: log.info(e) batch = '' blank = 1 session.close() return jsonify(batch=batch, blank=blank)
def form_data_array( session, batch, cells, models, columns=None, only_fair=True, include_outliers=False, ): # TODO: figure out a good way to form this from the existing # dataframe instead of making a new one then copying over. # Should be able to just re-index then apply some # lambda function over vectorized dataframe for filtering? session = Session() Results = Tables()['Results'] data = psql.read_sql_query( session.query(Results).filter(Results.batch == batch).filter( Results.cellid.in_(cells)).filter( Results.modelname.in_(models)).statement, session.bind) if not columns: columns = data.columns.values.tolist() multiIndex = pd.MultiIndex.from_product( [cells, models], names=['cellid', 'modelname'], ) newData = pd.DataFrame( index=multiIndex, columns=columns, ) newData.sort_index() # columns that don't contain performance data - this will be excluded # from outlier checks. non_comp_columns = [ 'id', 'cellid', 'modelname', 'batch', 'n_parms', 'figurefile', 'githash', 'lastmod', 'score', 'sparsity', 'modelpath', 'modelfile', 'username', 'labgroup', 'public', ] for c in cells: for m in models: dataRow = data.loc[(data.cellid == c) & (data.modelname == m)] # if col is in the non_comp list, just copy the value if it exists # then move on to the next iteration. for col in columns: if col in non_comp_columns: try: newData[col].loc[c, m] = dataRow[col].values.tolist()[0] continue except: newData[col].loc[c, m] = np.nan continue value = np.nan newData[col].loc[c, m] = value # If loop hits a continue, value will be left as NaN. # Otherwise, will be assigned a value from data # after passing all checks. try: value = dataRow[col].values.tolist()[0] except Exception as e: # Error should mean no value was recorded, # so leave as NaN. # No need to run outlier checks if value is missing. print("No %s recorded for %s,%s" % (col, m, c)) continue if not include_outliers: # If outliers is false, run a bunch of checks based on # measure and if a check fails, step out of the loop. # Comments for each check are copied from # from _Analysis : compute_data_matrix # "Drop r_test values below threshold" a1 = (col == 'r_test') b1 = (value < dataRow['r_floor'].values.tolist()[0]) a2 = (col == 'r_ceiling') b2 = (dataRow['r_test'].values.tolist()[0] < dataRow['r_floor'].values.tolist()[0]) a3 = (col == 'r_floor') b3 = b1 if (a1 and b1) or (a2 and b2) or (a3 and b3): continue # "Drop MI values greater than 1" a1 = (col == 'mi_test') b1 = (value > 1) a2 = (col == 'mi_fit') b2 = (0 <= value <= 1) if (a1 and b1) or (a2 and not b2): continue # "Drop MSE values greater than 1.1" a1 = (col == 'mse_test') b1 = (value > 1.1) a2 = (col == 'mse_fit') b2 = b1 if (a1 and b1) or (a2 and b2): continue # "Drop NLOGL outside normalized region" a1 = (col == 'nlogl_test') b1 = (-1 <= value <= 0) a2 = (col == 'nlogl_fit') b2 = b1 if (a1 and b1) or (a2 and b2): continue # TODO: is this still used? not listed in Results # "Drop gamma values that are too low" a1 = (col == 'gamma_test') b1 = (value < 0.15) a2 = (col == 'gamma_fit') b2 = b1 if (a1 and b1) or (a2 and b2): continue # TODO: is an outlier check needed for cohere_test # and/or cohere_fit? # If value existed and passed outlier checks, # re-assign it to the proper DataFrame position # to overwrite the NaN value. newData[col].loc[c, m] = value if only_fair: # If fair is checked, drop all rows that contain a NaN value for # every column. for c in cells: for m in models: if newData.loc[c, m].isnull().values.all(): newData.drop(c, level='cellid', inplace=True) break # Swap the 0th and 1st levels so that modelname is the primary index, # since most plots group by model. newData = newData.swaplevel(i=0, j=1, axis=0) return newData
from nems.db import NarfResults, Session import matplotlib.pyplot as plt import numpy as np batch = 303 modelnames = [ 'parm100pt_wcg02_fir15_pupgainctl_fit01_nested5', 'parm100pt_wcg02_fir15_behgain_fit01_nested5', 'parm100pt_wcg02_fir15_pupgain_fit01_nested5', 'parm100pt_wcg02_fir15_stategain_fit01_nested5' ] session = Session() results = psql.read_sql_query( session.query(NarfResults).filter(NarfResults.batch == batch).filter( NarfResults.modelname.in_(modelnames)).statement, session.bind) session.close() results.head() mapping = {modelname: i for i, modelname in enumerate(modelnames)} results['modelindex'] = results.modelname.map(mapping) # quick plot: #results.set_index(['modelindex', 'cellid'])['r_test'].unstack('modelindex').T.plot().legend(bbox_to_anchor=(1, 1)) r_test = results.set_index(['modelindex', 'cellid'])['r_test'].unstack('modelindex') #plt.plot(r_test)
def fit_report(): session = Session() db_tables = Tables() tQueue = db_tables['tQueue'] Results = db_tables['Results'] cSelected = request.args.getlist('cSelected[]') bSelected = request.args.get('bSelected')[:3] mSelected = request.args.getlist('mSelected[]') multi_index = pd.MultiIndex.from_product( [mSelected, cSelected], names=['modelname', 'cellid'] ) status = pd.DataFrame(index=multi_index, columns=['yn']) tuples = list(itertools.product(cSelected, [bSelected], mSelected)) notes = ['{0}/{1}/{2}'.format(t[0], t[1], t[2]) for t in tuples] qdata = psql.read_sql_query( session.query(tQueue) .filter(tQueue.note.in_(notes)) .statement, session.bind, ) results = psql.read_sql_query( session.query( Results.cellid, Results.batch, Results.modelname, ) .filter(Results.batch == bSelected) .filter(Results.cellid.in_(cSelected)) .filter(Results.modelname.in_(mSelected)) .statement, session.bind ) for i, t in enumerate(tuples): yn = 0.3 # missing try: complete = qdata.loc[qdata['note'] == notes[i], 'complete'].iloc[0] if complete < 0: yn = 0.4 # in progress elif complete == 0: yn = 0.5 # not started elif complete == 1: yn = 0.6 # finished elif complete == 2: yn = 0 # dead entry else: pass # unknown value, so leave as missing? except: try: result = results.loc[ (results['cellid'] == t[0]) & (results['batch'] == int(t[1])) & (results['modelname'] == t[2]), 'cellid' ].iloc[0] yn = 0.6 except: pass status['yn'].loc[t[2], t[0]] = yn status.reset_index(inplace=True) status = status.pivot(index='cellid', columns='modelname', values='yn') status = status[status.columns].astype(float) report = Fit_Report(status) report.generate_plot() session.close() image = str(b64encode(report.img_str))[2:-1] return jsonify(image=image)
def update_cells(): """Update the list of cells in the cell selector after a batch is selected (this will cascade from an analysis selection). Also updates current batch in Analysis for current analysis. """ session = Session() db_tables = Tables() Batches = db_tables['Batches'] sBatch = db_tables['sBatch'] Analysis = db_tables['Analysis'] # Only get the numerals for the selected batch, not the description. bSelected = request.args.get('bSelected') aSelected = request.args.get('aSelected') search = request.args.get('cellSearch') global _previous_update_cells previous_batch, previous_celllist = _previous_update_cells if bSelected == previous_batch: celllist = previous_celllist else: celllist = [ i[0] for i in session.query(Batches.cellid) .filter(Batches.batch == bSelected[:3]) .all() ] _previous_update_cells = (bSelected, celllist) batchname = ( session.query(sBatch) .filter(sBatch.id == bSelected[:3]) .first() ) if batchname: batch = str(bSelected[:3] + ': ' + batchname.name) else: batch = bSelected analysis = ( session.query(Analysis) .filter(Analysis.name == aSelected) .first() ) # don't change batch association if batch is blank if analysis and bSelected: analysis.batch = batch session.commit() session.close() # remove pairs filtered_cellids = [c for c in celllist if '+' not in c] # remove siteids filtered_cellids = [c for c in filtered_cellids if '-' in c] # filter by search string filtered_cellids = simple_search(search, filtered_cellids) return jsonify(celllist=filtered_cellids)
def edit_analysis(): """Take input from Analysis Editor modal and save it to the database. Button : Edit Analysis """ user = get_current_user() session = Session() Analysis = Tables()['Analysis'] modTime = datetime.datetime.now().replace(microsecond=0) eName = request.args.get('name') eId = request.args.get('id') eStatus = request.args.get('status') eTags = request.args.get('tags') eQuestion = request.args.get('question') eAnswer = request.args.get('answer') eLoad = request.args.get('load') eMod = request.args.get('mod') eFit = request.args.get('fit') eTree = json.dumps([eLoad, eMod, eFit]) if eId == '__none': checkExists = False else: checkExists = ( session.query(Analysis) .filter(Analysis.id == eId) .first() ) if checkExists: a = checkExists if ( a.public or (user.labgroup in a.labgroup) or (a.username == user.username) ): a.name = eName a.status = eStatus a.question = eQuestion a.answer = eAnswer a.tags = eTags try: a.lastmod = modTime except: a.lastmod = str(modTime) a.modeltree = eTree else: log.info("You do not have permission to modify this analysis.") return jsonify( success=("failed") ) # If it doesn't exist, add new sql alchemy object with the # appropriate attributes, which should get assigned to a new id else: # TODO: Currently copies user's labgroup by default. # Is that the behavior we want? try: a = Analysis( name=eName, status=eStatus, question=eQuestion, answer=eAnswer, tags=eTags, batch='', lastmod=modTime, modeltree=eTree, username=user.username, labgroup=user.labgroup, public='0' ) except: a = Analysis( name=eName, status=eStatus, question=eQuestion, answer=eAnswer, tags=eTags, batch='', lastmod=str(modTime), modeltree=eTree, username=user.username, labgroup=user.labgroup, public='0' ) session.add(a) addedName = a.name session.commit() session.close() # Clear out cached analysis selection, otherwise the simple caching logic # thinks the analysis selection didn't change so it will load previous # modeltree global _previous_update_models _previous_update_models = ('', '', '', []) # After handling submissions, return user to main page so that it # refreshes with new analysis included in list return jsonify(success="Analysis %s saved successfully." % addedName)
def filter_cells(session, batch, cells, min_snr=0, min_iso=0, min_snri=0): """ Returns a list of cells that don't meet the minimum snr/iso/snri criteria specified. The calling function can then remove them from the cell list if desired (ex: for cell in bad_cells, cell_list.remove(cell)) Arguments: ---------- batch : int The batch number to query. cells : list A list of the cellids to be checked. min_snr : float The minimum signal to noise ratio desired. min_iso : float The minimum isolation value desired. min_snri : float The minimum signal to noise ratio index desired. session : object An open database session object for querying Batches. """ bad_cells = [] session = Session() Batches = Tables()['Batches'] for cellid in cells: dbCriteria = (session.query(Batches).filter( Batches.batch == batch).filter( Batches.cellid.ilike(cellid)).first()) if dbCriteria: db_snr = min(dbCriteria.est_snr, dbCriteria.val_snr) db_iso = dbCriteria.min_isolation db_snri = dbCriteria.min_snr_index a = (min_snr > db_snr) b = (min_iso > db_iso) c = (min_snri > db_snri) if a or b or c: bad_cells.append(cellid) # Uncomment section below to include verbose output of # why individual cells were 'bad' #filterReason = "" #if a: # filterReason += ( # "min snr: %s -- was less than criteria: %s\n" # %(db_snr, min_snr) # ) #if b: # filterReason += ( # "min iso: %s -- was less than criteria: %s\n" # %(db_iso, min_iso) # ) #if c: # filterReason += ( # "min snr index: %s -- was less than criteria: %s\n" # %(db_snri, min_snri) # ) #print( # "Removing cellid: %s,\n" # "because: %s" # %(cellid, filterReason) # ) else: print("No entry in Batches for cellid: {0} in batch: {1}".format( cellid, batch)) bad_cells.append(cellid) print("Number of bad cells to snr/iso criteria: {0}".format( len(bad_cells))) print("Out of total cell count: {0}".format(len(cells))) return bad_cells
def main_view(): """Initialize the nems_analysis landing page. Queries the database to get lists of available analyses, batches, status filters, tag filters, and results columns. Specifies defaults for results columns, row limit and sort column. Returns: -------- main.html : template The landing page template rendered with variables for analysislist, batchlist, collist, defaultcols, measurelist, defaultrowlimit, sortlist, defaultsort, statuslist, and taglist. """ # TODO: figure out how to integrate sec_lvl/superuser mode # maybe need to add sec_lvl column to analysis/batches/results? # then can compare in query ex: if user.sec_lvl > analysis.sec_lvl user = get_current_user() session = Session() db_tables = Tables() Results = db_tables['Results'] Analysis = db_tables['Analysis'] Batches = db_tables['Batches'] sBatch = db_tables['sBatch'] # .all() returns a list of tuples, so it's necessary to pull the # name elements out into a list by themselves. analyses = ( session.query(Analysis) .filter(or_( int(user.sec_lvl) == 9, Analysis.public == '1', Analysis.labgroup.ilike('%{0}%'.format(user.labgroup)), Analysis.username == user.username, )) .order_by(asc(Analysis.id)) .all() ) analysislist = [ a.name for a in analyses ] analysis_ids = [ a.id for a in analyses ] batchids = [ i[0] for i in session.query(Batches.batch) .distinct() #.filter(or_( # int(user.sec_lvl) == 9, # Batches.public == '1', # Batches.labgroup.ilike('%{0}%'.format(user.labgroup)), # Batches.username == user.username, # )) .all() ] batchnames = [] for i in batchids: name = ( session.query(sBatch.name) .filter(sBatch.id == i) .first() ) if not name: batchnames.append('') else: batchnames.append(name.name) batchlist = [ (batch + ': ' + batchnames[i]) for i, batch in enumerate(batchids) ] batchlist.sort() # Default settings for results display. # TODO: let user choose their defaults and save for later sessions # cols are in addition to cellid, modelname and batch, # which are set up to be required defaultcols = n_ui.cols defaultrowlimit = n_ui.rowlimit defaultsort = n_ui.sort measurelist = n_ui.measurelist statuslist = [ i[0] for i in session.query(Analysis.status) .filter(Analysis.name.in_(analysislist)) .distinct().all() ] # Separate tags into list of lists of strings. tags = [ i[0].split(",") for i in session.query(Analysis.tags) .filter(Analysis.name.in_(analysislist)) .distinct().all() ] # Flatten list of lists into a single list of all tag strings # and remove leading and trailing whitespace. taglistbldupspc = [i for sublist in tags for i in sublist] taglistbldup = [t.strip() for t in taglistbldupspc] # Reform the list with only unique tags taglistbl = list(set(taglistbldup)) # Finally, remove any blank tags and sort the list. taglist = [t for t in taglistbl if t != ''] taglist.sort() # Returns all columns in the format 'Results.columnName,' # then removes the leading 'Results.' from each string collist = ['%s'%(s) for s in Results.__table__.columns] collist = [s.replace('Results.', '') for s in collist] sortlist = copy.deepcopy(collist) # Remove cellid and modelname from options toggles- make them required. required_cols = n_ui.required_cols for col in required_cols: collist.remove(col) # imported at top from PlotGenerator plotTypeList = PLOT_TYPES # imported at top from nems_web.run_scrits.script_utils scriptList = scan_for_scripts() session.close() return render_template( 'main.html', analysislist=analysislist, analysis_ids=analysis_ids, batchlist=batchlist, collist=collist, defaultcols=defaultcols, measurelist=measurelist, defaultrowlimit=defaultrowlimit, sortlist=sortlist, defaultsort=defaultsort, statuslist=statuslist, taglist=taglist, plotTypeList=plotTypeList, username=user.username, seclvl=int(user.sec_lvl), iso=n_ui.iso, snr=n_ui.snr, snri=n_ui.snri, scripts=scriptList, bokeh_version=bokeh_version )