def modelSumLabelUpdate(modelSum, labels, data, problem, txn): model = db.Model(data['user'], data['hub'], data['track'], problem['chrom'], problem['chromStart'], modelSum['penalty']).get(txn=txn) return calculateModelLabelError(model, labels, problem, modelSum['penalty'])
def getModels(data): problems = Tracks.getProblems(data) output = [] for problem in problems: modelSummaries = db.ModelSummaries(data['user'], data['hub'], data['track'], problem['chrom'], problem['chromStart']).get() if len(modelSummaries.index) < 1: lopartOutput = generateLOPARTModel(data, problem) output.extend(lopartOutput) continue nonZeroRegions = modelSummaries[modelSummaries['regions'] > 0] if len(nonZeroRegions.index) < 1: lopartOutput = generateLOPARTModel(data, problem) output.extend(lopartOutput) continue withPeaks = nonZeroRegions[nonZeroRegions['numPeaks'] > 0] if len(withPeaks.index) < 1: lopartOutput = generateLOPARTModel(data, problem) output.extend(lopartOutput) continue noError = withPeaks[withPeaks['errors'] < 1] if len(noError.index) < 1: lopartOutput = generateLOPARTModel(data, problem) output.extend(lopartOutput) continue elif len(noError.index) > 1: # Select which model to display from modelSums with 0 error noError = whichModelToDisplay(data, problem, noError) penalty = noError['penalty'].iloc[0] minErrorModel = db.Model(data['user'], data['hub'], data['track'], problem['chrom'], problem['chromStart'], penalty) model = minErrorModel.getInBounds(data['ref'], data['start'], data['end']) onlyPeaks = model[model['annotation'] == 'peak'] # Organize the columns onlyPeaks = onlyPeaks[modelColumns] onlyPeaks.columns = jbrowseModelColumns output.extend(onlyPeaks.to_dict('records')) return output
def putModel(data): modelData = pd.read_json(data['modelData']) modelData.columns = modelColumns modelInfo = data['modelInfo'] problem = modelInfo['problem'] penalty = data['penalty'] user = modelInfo['user'] hub = modelInfo['hub'] track = modelInfo['track'] txn = db.getTxn() db.Model(user, hub, track, problem['chrom'], problem['chromStart'], penalty).put(modelData, txn=txn) labels = db.Labels(user, hub, track, problem['chrom']).get() errorSum = calculateModelLabelError(modelData, labels, problem, penalty) db.ModelSummaries(user, hub, track, problem['chrom'], problem['chromStart']).add(errorSum, txn=txn) txn.commit() return modelInfo