def make_all_static_models(): """Write static models for all models in the database.""" # delete static model dir try: shutil.rmtree(static_dir) except OSError: pass # make the directories try: makedirs(join(static_dir, 'raw')) except OSError: pass failed_models = [] polisher_path = autodetect_model_polisher() session = Session() bigg_ids = [i[0] for i in session.query(Model.bigg_id)] for bigg_id in bigg_ids: if DEBUG and bigg_id != 'e_coli_core': continue # keep track of which models failed print('------------------------------------------------------------\n' 'Dumping model %s' % bigg_id) if not write_static_model(bigg_id, polisher_path): failed_models.append(bigg_id) session.close() if len(failed_models) > 0: return "Failed for models " + " ".join(failed_models)
def get(self, compartment_bigg_id): session = Session() result_db = (session .query(Compartment) .filter(Compartment.bigg_id == compartment_bigg_id) .first()) session.close() result = {'bigg_id': result_db.bigg_id, 'name': result_db.name} self.return_result(result)
def get(self): metabolite_dict = {k: float(v[0]) for k, v in self.request.query_arguments.iteritems()} hash = hash_metabolite_dictionary(metabolite_dict) session = Session() try: results = {'results': [queries.reaction_with_hash(hash, session)], 'results_count': 1} except NotFoundError: results = {'results': [], 'results_count': 0} session.close() self.write(results) self.finish()
def safe_query(func, *args, **kwargs): """Run the given function, and raise a 404 if it fails. Arguments --------- func: The function to run. *args and **kwargs are passed to this function. """ session = Session() kwargs["session"] = session try: return func(*args, **kwargs) except queries.NotFoundError as e: raise HTTPError(status_code=404, reason=e.message) except ValueError as e: raise HTTPError(status_code=400, reason=e.message) finally: session.close()
def post(self): query_strings = [x.strip() for x in self.get_argument('query', '').split(',') if x != ''] # run the queries session = Session() def checkbox_arg(name): return self.get_argument(name, None) == 'on' all_models = queries.get_model_list(session) model_list = [m for m in all_models if checkbox_arg(m)] include_metabolites = checkbox_arg('include_metabolites') include_reactions = checkbox_arg('include_reactions') include_genes = checkbox_arg('include_genes') metabolite_results = [] reaction_results = [] gene_results = [] # genes for query_string in query_strings: if include_genes: gene_results += queries.search_for_genes(query_string, session, limit_models=model_list) if include_reactions: reaction_results += queries.search_for_reactions(query_string, session, limit_models=model_list) if include_metabolites: metabolite_results += queries.search_for_metabolites(query_string, session, limit_models=model_list) result = {'results': {'reactions': reaction_results, 'metabolites': metabolite_results, 'genes': gene_results}, 'no_pager': True} session.close() template = env.get_template("list_display.html") self.write(template.render(result)) self.finish()
def post(self): query_string = self.get_argument('query', '') database_source = self.get_argument('database_source', '') session = Session() metabolites = queries.get_metabolites_for_database_id(session, query_string, database_source) reactions = queries.get_reactions_for_database_id(session, query_string, database_source) genes = queries.get_genes_for_database_id(session, query_string, database_source) session.close() dictionary = {'results': {'metabolites': metabolites, 'reactions': reactions, 'genes': genes}, 'no_pager': True, 'hide_organism': True} template = env.get_template("list_display.html") self.write(template.render(dictionary)) self.finish()
def dump_model(bigg_id): session = Session() # find the model model_db = (session .query(Model) .filter(Model.bigg_id == bigg_id) .first()) if model_db is None: session.commit() session.close() raise Exception('Could not find model %s' % bigg_id) model = cobra.core.Model(bigg_id) # COBRApy uses the description as the ID sometimes. See # https://github.com/opencobra/cobrapy/pull/152 model.description = bigg_id # genes logging.debug('Dumping genes') gene_names = (session .query(Gene.bigg_id, Gene.name) .join(ModelGene) .filter(ModelGene.model_id == model_db.id) .order_by(Gene.bigg_id) ) for gene_id, gene_name in gene_names: gene = cobra.core.Gene(gene_id) gene.name = gene_name model.genes.append(gene) # reactions logging.debug('Dumping reactions') reactions_db = (session .query(Reaction, ModelReaction, Synonym) .join(ModelReaction) .join(OldIDSynonym, OldIDSynonym.ome_id == ModelReaction.id) .join(Synonym, Synonym.id == OldIDSynonym.synonym_id) .filter(ModelReaction.model_id == model_db.id) .order_by(Reaction.bigg_id) ) # make dictionaries and cast results result_dicts = [] for r_db, mr_db, synonym_db in reactions_db: d = {} d['bigg_id'] = r_db.bigg_id d['name'] = r_db.name d['gene_reaction_rule'] = mr_db.gene_reaction_rule d['lower_bound'] = float(mr_db.lower_bound) d['upper_bound'] = float(mr_db.upper_bound) d['objective_coefficient'] = float(mr_db.objective_coefficient) d['original_bigg_id'] = synonym_db.synonym result_dicts.append(d) def filter_duplicates(result_dicts): """Find the reactions with multiple ModelReactions and increment names.""" tups_by_bigg_id = defaultdict(list) # for each ModelReaction for d in result_dicts: # add to duplicates tups_by_bigg_id[d['bigg_id']].append(d) # duplicates have multiple ModelReactions duplicates = {k: v for k, v in tups_by_bigg_id.iteritems() if len(v) > 1} for bigg_id, dup_dicts in duplicates.iteritems(): # add -copy1, copy2, etc. to the bigg ids for the duplicates last = bigg_id for d in dup_dicts: last = increment_id(last, 'copy') d['bigg_id'] = last return result_dicts # fix duplicates result_filtered = filter_duplicates(result_dicts) reactions = [] for result_dict in result_filtered: r = cobra.core.Reaction(result_dict['bigg_id']) r.name = result_dict['name'] r.gene_reaction_rule = result_dict['gene_reaction_rule'] r.lower_bound = result_dict['lower_bound'] r.upper_bound = result_dict['upper_bound'] r.objective_coefficient = result_dict['objective_coefficient'] r.notes = {'original_bigg_id': result_dict['original_bigg_id']} reactions.append(r) model.add_reactions(reactions) # metabolites logging.debug('Dumping metabolites') metabolites_db = \ (session .query(Component.bigg_id, Compartment.bigg_id, Component.name) .join(CompartmentalizedComponent) .join(Compartment) .join(ModelCompartmentalizedComponent) .filter(ModelCompartmentalizedComponent.model_id == model_db.id) .order_by(Component.bigg_id) ) metabolites = [] compartments = set() for component_id, compartment_id, component_name in metabolites_db: if component_id is not None and compartment_id is not None: m = cobra.core.Metabolite( id=component_id + '_' + compartment_id, compartment=compartment_id) m.name = component_name compartments.add(compartment_id) metabolites.append(m) model.add_metabolites(metabolites) # compartments compartment_db = (session.query(Compartment) .filter(Compartment.bigg_id.in_(compartments))) model.compartments = {i.bigg_id: i.name for i in compartment_db} # reaction matrix logging.debug('Dumping reaction matrix') matrix_db = (session .query(ReactionMatrix.stoichiometry, Reaction.bigg_id, Component.bigg_id, Compartment.bigg_id) # component, compartment .join(CompartmentalizedComponent) .join(Component) .join(Compartment) # reaction .join(Reaction) .join(ModelReaction) .filter(ModelReaction.model_id == model_db.id) .distinct()) # make sure we don't duplicate # load metabolites for stoich, reaction_id, component_id, compartment_id in matrix_db: try: m = model.metabolites.get_by_id(component_id + '_' + compartment_id) except KeyError: logging.warn('Metabolite not found %s in compartment %s for reaction %s' % \ (component_id, compartment_id, reaction_id)) continue # add to reactions if reaction_id in model.reactions: # check again that we don't duplicate r = model.reactions.get_by_id(reaction_id) if m not in r.metabolites: r.add_metabolites({m: float(stoich)}) else: # try incremented ids while True: reaction_id = increment_id(reaction_id, 'copy') try: # check again that we don't duplicate r = model.reactions.get_by_id(reaction_id) if m not in r.metabolites: r.add_metabolites({m: float(stoich)}) except KeyError: break session.commit() session.close() return model
def get(self): # get arguments query_string = self.get_argument("query") page = self.get_argument('page', None) size = self.get_argument('size', None) search_type = self.get_argument('search_type', None) include_link_urls = "include_link_urls" in self.request.query_arguments # defaults sort_column = None sort_direction = 'ascending' # get the sorting column columns = _parse_col_arg(self.get_argument('columns', None)) sort_column, sort_direction = _get_col_name(self.request.query_arguments, columns, sort_column, sort_direction) # run the queries session = Session() result = None if search_type == 'reactions': # reactions raw_results = queries.search_for_universal_reactions(query_string, session, page, size, sort_column, sort_direction) if include_link_urls: raw_results = [dict(x, link_urls={'bigg_id': '/universal/reactions/{bigg_id}'.format(**x)}) for x in raw_results] result = {'results': [dict(x, model_bigg_id='Universal', organism='') for x in raw_results], 'results_count': queries.search_for_universal_reactions_count(query_string, session)} elif search_type == 'metabolites': raw_results = queries.search_for_universal_metabolites(query_string, session, page, size, sort_column, sort_direction) if include_link_urls: raw_results = [dict(x, link_urls={'bigg_id': '/universal/metabolites/{bigg_id}'.format(**x)}) for x in raw_results] result = {'results': [dict(x, model_bigg_id='Universal', organism='') for x in raw_results], 'results_count': queries.search_for_universal_metabolites_count(query_string, session)} elif search_type == 'genes': raw_results = queries.search_for_genes(query_string, session, page, size, sort_column, sort_direction) if include_link_urls: raw_results = [dict(x, link_urls={'bigg_id': '/models/{model_bigg_id}/genes/{bigg_id}'.format(**x)}) for x in raw_results] result = {'results': raw_results, 'results_count': queries.search_for_genes_count(query_string, session)} elif search_type == 'models': raw_results = queries.search_for_models(query_string, session, page, size, sort_column, sort_direction) if include_link_urls: raw_results = [dict(x, link_urls={'bigg_id': '/models/{bigg_id}'.format(**x), 'metabolite_count': '/models/{bigg_id}/metabolites'.format(**x), 'reaction_count': '/models/{bigg_id}/reactions'.format(**x), 'gene_count': '/models/{bigg_id}/genes'.format(**x)}) for x in raw_results] result = {'results': raw_results, 'results_count': queries.search_for_models_count(query_string, session)} else: raise HTTPError(400, 'Bad search_type %s' % search_type) session.close() self.write(result) self.finish()
def get(self): session = Session() results = [{'bigg_id': x[0], 'name': x[1]} for x in session.query(Compartment.bigg_id, Compartment.name)] session.close() self.return_result(results)