def _model_from_stream(stream, filename): if filename.endswith(".gz"): with GzipFile(fileobj=stream) as file_handle: model = read_sbml_model(file_handle) else: model = read_sbml_model(stream) return model
def models(): """ Preload the storage module with test models. This fixture ensures models are loaded locally, and only once per test session. The returned dict contains a map of model identifier to the corresponding numeric id in the storage module. For endpoint tests, use the returned dict to access the numeric id for a given model id. For unit tests, consider using the function-scoped fixtures below to be able to make revertable modifications to the models. """ model_keys = {"e_coli_core": 1, "e_coli_core_proprietary": 2, "iJO1366": 3} model = read_sbml_model("tests/data/e_coli_core.sbml.gz") storage._MODELS[model_keys["e_coli_core"]] = storage.ModelWrapper( model, None, "Escherichia coli", "BIOMASS_Ecoli_core_w_GAM") model = read_sbml_model("tests/data/e_coli_core.sbml.gz") storage._MODELS[ model_keys["e_coli_core_proprietary"]] = storage.ModelWrapper( model, 1, "Escherichia coli", "BIOMASS_Ecoli_core_w_GAM") model = read_sbml_model("tests/data/iJO1366.sbml.gz") storage._MODELS[model_keys["iJO1366"]] = storage.ModelWrapper( model, None, "Escherichia coli", "BIOMASS_Ec_iJO1366_core_53p95M") return model_keys
def to_json( directory, models_directory, icc389=None, icc431=None, icc470=None, icc651=None, ): if icc389: model = read_sbml_model(join(directory, models_directory, icc389)) save_json_model(model, join(directory, models_directory, f'{model.id}.json')) if icc431: model = read_sbml_model(join(directory, models_directory, icc431)) save_json_model(model, join(directory, models_directory, f'{model.id}.json')) if icc470: model = read_sbml_model(join(directory, models_directory, icc470)) save_json_model(model, join(directory, models_directory, f'{model.id}.json')) if icc651: model = read_sbml_model(join(directory, models_directory, icc651)) save_json_model(model, join(directory, models_directory, f'{model.id}.json'))
def test_stable_gprs(data_directory, tmp_path): mini = read_sbml_model(join(data_directory, "mini_fbc2.xml")) mini.reactions.GLCpts.gene_reaction_rule = "((b2415 and b2417)or (b2416))" fixed = join(str(tmp_path), "fixed_gpr.xml") write_sbml_model(mini, fixed) fixed_model = read_sbml_model(fixed) assert (fixed_model.reactions.GLCpts.gene_reaction_rule == "(b2415 and b2417) or b2416")
def test_from_sbml_string(data_directory): """Test reading from SBML string.""" sbml_path = join(data_directory, "mini_fbc2.xml") with open(sbml_path, "r") as f_in: sbml_str = f_in.read() model1 = read_sbml_model(sbml_str) model2 = read_sbml_model(sbml_path) TestCobraIO.compare_models(name="read from string", model1=model1, model2=model2)
def raise_libsbml_errors(): with pytest.raises(ImportError): io.read_sbml_model('test') with pytest.raises(ImportError): io.write_sbml_model(None, 'test') with pytest.raises(ImportError): io.load_matlab_model('test') with pytest.raises(ImportError): io.write_legacy_sbml(None, 'test') with pytest.raises(ImportError): io.read_legacy_sbml(None, 'test')
def test_read_write_sbml_annotations(data_directory, tmp_path): """Test reading and writing annotations.""" with open(join(data_directory, "annotation.xml"), "r") as f_in: model1 = read_sbml_model(f_in) sbml_path = join(str(tmp_path), "test.xml") with open(sbml_path, "w") as f_out: write_sbml_model(model1, f_out) with open(sbml_path, "r") as f_in: model2 = read_sbml_model(f_in) _check_sbml_annotations(model2)
def test_filehandle(data_directory, tmp_path): """Test reading and writing to file handle.""" with open(join(data_directory, "mini_fbc2.xml"), "r") as f_in: model1 = read_sbml_model(f_in) assert model1 is not None sbml_path = join(str(tmp_path), "test.xml") with open(sbml_path, "w") as f_out: write_sbml_model(model1, f_out) with open(sbml_path, "r") as f_in: model2 = read_sbml_model(f_in) TestCobraIO.compare_models(name="filehandle", model1=model1, model2=model2)
def constraints(request): if request.method == "POST": handle_upload_file(request.FILES['file'], str(request.FILES['file'])) filename = request.FILES['file'].name if filename == "3HAO.lp": gurobi = gurobipy.read( "/Users/mihaoyang/Desktop/BEProject7/Recon/File/3HAO.lp") with Capturing() as output: gurobi.optimize() model = io.read_sbml_model( "/Users/mihaoyang/Desktop/BEProject7/Recon/File/Recon2_2.xml") model.objective = "3HAO" with Capturing() as output1: model.summary() model.optimize().objective_value context = dict(a1=output, a2=output1) return render(request, 'Recon/set_constraints.html', context) if filename == "2AMADPTm.lp": gurobi = gurobipy.read( "/Users/mihaoyang/Desktop/BEProject7/Recon/File/2AMADPTm.lp") with Capturing() as output: gurobi.optimize() model = io.read_sbml_model( "/Users/mihaoyang/Desktop/BEProject7/Recon/File/Recon2_2.xml") model.objective = "2AMADPTm" with Capturing() as output1: model.summary() model.optimize().objective_value context = dict(a1=output, a2=output1) return render(request, 'Recon/set_constraints.html', context) if filename == "2HATVLACthc.lp": gurobi = gurobipy.read( "/Users/mihaoyang/Desktop/BEProject7/Recon/File/2HATVLACthc.lp" ) with Capturing() as output: gurobi.optimize() model = io.read_sbml_model( "/Users/mihaoyang/Desktop/BEProject7/Recon/File/Recon2_2.xml") model.objective = "2HATVLACthc" with Capturing() as output1: model.summary() model.optimize().objective_value context = dict(a1=output, a2=output1) return render(request, 'Recon/set_constraints.html', context) return render(request, 'Recon/set_constraints.html')
def create_test_model(test_pickle=salmonella_pickle): """Returns a cobra model for testing. The default model is the up to date version of the Salmonella enterica Typhimurium LT2 model published in Thiele et al. 2011 BMC Sys Bio 5:8 test_pickle: The complete file name of a pickled cobra.Model or SBML XML file to be read. We currently provide Salmonella enterica Typhimurium and Escherichia coli models whose paths are stored in cobra.test.salmonella_pickle and cobra.test.ecoli_pickle, respectively. The ecoli model is a variant of the model published in Orth et al. 2011 Mol Syst Biol 7:535 """ from os import name as __name try: from cPickle import load except: from pickle import load try: with open(test_pickle, "rb") as infile: model = load(infile) except: #if the pickle can't be loaded then load the sbml xml from warnings import warn warn("Couldn't load %s. Loading the default model %s instead" % (test_pickle, salmonella_sbml)) sys.path.insert(0, cobra_location) from cobra.io import read_sbml_model model = read_sbml_model(salmonella_sbml) sys.path.pop(0) return model
def test_notes(tmp_path): """Testing if model notes are written in SBML""" path_to_file = join(str(tmp_path), "model_notes.xml") # making a minimal cobra model to test notes model = cobra.Model("e_coli_core") model.notes["Remark"] = "...Model Notes..." met = cobra.Metabolite("pyr_c", compartment="c") model.add_metabolites([met]) met.notes["Remark"] = "Note with \n newline" rxn = cobra.Reaction("R_ATPM") model.add_reactions([rxn]) rxn.notes["Remark"] = "What about me?" model.objective_direction = "max" model.objective = rxn write_sbml_model(model, path_to_file) # reading the model back model_after_reading = read_sbml_model(path_to_file) met_after_reading = model_after_reading.metabolites.get_by_id("pyr_c") reaction_after_reading = model_after_reading.reactions.get_by_id("R_ATPM") # checking if notes are written to model assert model_after_reading.notes["Remark"] == "...Model Notes..." # checking notes for metabolite and reaction assert met_after_reading.notes["Remark"] == "Note with \n newline" assert reaction_after_reading.notes["Remark"] == "What about me?"
def test_boundary_conditions(data_directory): """Test infinity bound example. """ sbml_path1 = join(data_directory, "fbc_ex1.xml") model1 = read_sbml_model(sbml_path1) sol1 = model1.optimize() # model with species boundaryCondition==True sbml_path2 = join(data_directory, "fbc_ex2.xml") model2 = read_sbml_model(sbml_path2) sol2 = model2.optimize() r = model2.reactions.get_by_id("EX_X") assert r.lower_bound == -float("Inf") assert r.upper_bound == float("Inf") assert sol1.objective_value == sol2.objective_value
def report(ref_model_path,library_folder,outfolder): ref_model = read_sbml_model(ref_model_path) table, reactions_matrix, metabolite_matrix, gene_matrix = Summary.report_make_table(library_folder, ref_model) table, R_pw_diff, M_pw_diff, G_pw_diff = Summary.report_clustering_plots(table , reactions_matrix, metabolite_matrix, gene_matrix, outfolder) ''' R_pw_sim = Summary.PW_similarity(reactions_matrix) M_pw_sim = Summary.PW_similarity(metabolite_matrix) G_pw_sim = Summary.PW_similarity(gene_matrix) R_PW = Summary.merge_PW_df(R_pw_sim, R_pw_diff) M_PW = Summary.merge_PW_df(M_pw_sim, M_pw_diff) G_PW = Summary.merge_PW_df(G_pw_sim, G_pw_diff) Summary.heat(R_PW, outfolder +'/reactions_PW.png') Summary.heat(M_PW ,outfolder +'/metabolites_PW.png') Summary.heat(G_PW, outfolder +'/genes_PW.png' ) ''' Summary.table_as_png(table, outfolder +'/library_sumary.png') return table
def knockout_reactions_for_genes(model_path: Path, genes=None) -> Dict[str, List[str]]: """Calculate mapping of genes to affected reactions. Which reactions are knocked out by a given gene. A single gene knockout can affect multiple reactions. Uses GPR mappings. """ model = read_sbml_model(str(model_path), f_replace={}) # type: cobra.core.Model if genes is None: genes = model.genes knockout_reactions = defaultdict(list) for reaction in model.reactions: # type: cobra.core.Reaction gpr = reaction.gene_reaction_rule tree, gpr_genes = cobra.core.gene.parse_gpr(gpr) for gene in genes: # type: cobra.core.Gene if gene.id not in gpr_genes: gene_essential = False else: # eval_gpr: True if the gene reaction rule is true with # the given knockouts otherwise false gene_essential = not cobra.core.gene.eval_gpr( tree, knockouts={gene.id}) if gene_essential: knockout_reactions[gene.id].append(reaction.id) # from pprint import pprint # pprint(knockout_reactions) return knockout_reactions
def test_get_mw(self): '''Tests get_mw method.''' curr_dir = os.path.dirname(os.path.realpath(__file__)) model = read_sbml_model( os.path.join(curr_dir, '../../../data/models/yeastGEM.xml')) tests = 0 while tests < 10: met = random.choice(model.metabolites) # met = model.metabolites.get_by_id('s_3071__91__lp__93__') print(met.id, met.name) # Get existing formula and mw: formula = met.formula if formula: existing_mw = get_molecular_mass(formula, r_mass=2**16) # Unset formula: met.formula = None calc_mw = get_mw(model, met.id) if calc_mw: self.assertAlmostEqual(existing_mw, calc_mw, 0) tests += 1 # Reset formula: met.formula = formula
def test_model_history(tmp_path): """Testing reading and writing of ModelHistory.""" model = Model("test") model._sbml = { "creators": [{ "familyName": "Mustermann", "givenName": "Max", "organisation": "Muster University", "email": "*****@*****.**", }] } sbml_path = join(str(tmp_path), "test.xml") with open(sbml_path, "w") as f_out: write_sbml_model(model, f_out) with open(sbml_path, "r") as f_in: model2 = read_sbml_model(f_in) assert "creators" in model2._sbml assert len(model2._sbml["creators"]) is 1 c = model2._sbml["creators"][0] assert c["familyName"] == "Mustermann" assert c["givenName"] == "Max" assert c["organisation"] == "Muster University" assert c["email"] == "*****@*****.**"
def _load_model_from_file(path, handle): """Try to parse a model from a file handle using different encodings.""" logger.debug('Reading file from %s assuming pickled model.' % path) try: model = pickle.load(handle) except (TypeError, pickle.UnpicklingError): logger.debug('Cannot unpickle %s. Assuming json model next.' % path) try: model = load_json_model(path) except ValueError: logger.debug( "Cannot import %s as json model. Assuming sbml model next." % path) try: model = read_sbml_model(path) except AttributeError as e: logger.error( "cobrapy doesn't raise a proper exception if a file does not contain an SBML model" ) raise e except Exception as e: logger.error( "Looks like something blow up while trying to import {} as a SBML model." "Try validating the model at http://sbml.org/Facilities/Validator/ to get more information." .format(path)) raise e return model
def test_sbml_read(self): ## with catch_warnings(record=True) as w: model = io.read_sbml_model(test_sbml_file) self.assertEqual(len(model.reactions), len(self.model.reactions)) # make sure that an error is raised when given a nonexistent file self.assertRaises(IOError, io.read_sbml_model, "fake_file_which_does_not_exist")
def load_model_from_file(filename): """ Load a model from a file based on the extension of the file name. Parameters ---------- filename : str Path to model file Returns ------- cobra.core.Model Model object loaded from file Raises ------ IOError If model file extension is not supported. """ (root, ext) = splitext(filename) if ext == '.mat': model = load_matlab_model(filename) elif ext == '.xml' or ext == '.sbml': model = read_sbml_model(filename) elif ext == '.json': model = load_json_model(filename) else: raise IOError( 'Model file extension not supported for {0}'.format(filename)) return model
def create_cobra_model_from_agora_model(agora_name, validate=False): """ Create a COBRA model from an AGORA model. Parameters ---------- agora_name: str Name of AGORA model validate : bool, optional When True, perform validity checks on COBRA model Returns ------- cobra.Model COBRA model created from SBML representation of AGORA model """ # Download the SBML file. response = requests.get('{0}AGORA/sbml/{1}.xml'.format(vmh_url, agora_name)) if response.status_code != requests.codes.OK: response.raise_for_status() # Convert to a cobra.Model object. with io.BytesIO(response.content) as f: model = read_sbml_model(f) # If requested, validate the COBRA model. if validate: warn('Coming soon') return model
def test_build_minimal_medium(self): # Load regular data cobra_model = None with NamedTemporaryFile(dir=self.temp_d, delete=False) as tempf: self.rpsbml.write_to_file(tempf.name) cobra_model=cobra_io.read_sbml_model(tempf.name, use_fbc_package=True) cobra_solution = cobra_model.optimize() df1 = build_minimal_medium( model=cobra_model, solution=cobra_solution ) df2 = build_minimal_medium( model=cobra_model ) # Return values. self.assertIsInstance( df1, pd.DataFrame ) # Values self.assertEqual( df2.shape[1], 2 ) # Close. tempf.close() remove(tempf.name)
def test_convert_ids(test_model): model_in = read_sbml_model(test_model[0]['path']) model_in.id = 'A bad id' model_in.description = 'throw away' model_in.add_reaction(Reaction('DADA')) model_in.reactions.get_by_id('DADA').add_metabolites({ Metabolite('dad_DASH_2_c'): -1 }) returned, old_ids = convert_ids(model_in.copy()) assert returned.id == 'A_bad_id' assert returned.description == 'A_bad_id' assert 'dad_2_c' in returned.metabolites assert 'dad_2_c' in [x.id for x in returned.reactions.get_by_id('DADA').metabolites] assert ('dad_2_c', 'dad_DASH_2_c') in old_ids['metabolites'].items() assert ('EX_gln__L_e', 'EX_gln_L_e') in old_ids['reactions'].items() # genes assert 'gene_with_period_AT22' in [x.id for x in returned.genes] assert returned.reactions.get_by_id('FRD7').gene_reaction_rule == model_in.reactions.get_by_id('FRD7').gene_reaction_rule.replace('.22', '_AT22').replace('.12', '_AT12') assert old_ids['genes']['gene_with_period_AT22'] == 'gene_with_period.22' assert len(returned.genes) == len(model_in.genes) assert ['.22' not in x.id for x in returned.genes] assert ['.22' not in x.gene_reaction_rule for x in returned.reactions]
def test_boundary_conditions(data_directory): """Test infinity bound example. """ sbml_path1 = join(data_directory, "fbc_ex1.xml") model1 = read_sbml_model(sbml_path1) sol1 = model1.optimize() # model with species boundaryCondition==True sbml_path2 = join(data_directory, "fbc_ex2.xml") model2 = read_sbml_model(sbml_path2) sol2 = model2.optimize() r = model2.reactions.get_by_id("EX_X") assert r.lower_bound == config.lower_bound assert r.upper_bound == config.upper_bound assert sol1.objective_value == sol2.objective_value
def test_global_eff(self): # Load regular data expected_medium = { 'EX_MNXM83': 2000.0, 'EX_pi_e': 2000.0, 'EX_fe3_e': 1000.0 } specie_missing_id = 'MNXM83' medium = load_medium_file(os_path.join(self.medium_path, 'medium.fmt.c.csv')) medium = crossref_medium_id( df=medium, model=self.rpsbml, compartment_id='MNXC2' ) exchange = self.rpsbml.build_exchange_reaction('c') df = merge_medium_exchange( medium = medium, exchange_reaction = exchange ) rpsbml = add_missing_specie( self.rpsbml, df, 'c' ) cobra_model = None with NamedTemporaryFile(dir=self.temp_d, delete=False) as tempf: rpsbml.write_to_file(tempf.name) cobra_model=cobra_io.read_sbml_model(tempf.name, use_fbc_package=True) cobra_model.medium = df_to_medium(df) self.assertEqual( cobra_model.medium, expected_medium ) tempf.close() remove(tempf.name)
def create_test_model(test_pickle=salmonella_pickle): """Returns a cobra model for testing. The default model is the up to date version of the Salmonella enterica Typhimurium LT2 model published in Thiele et al. 2011 BMC Sys Bio 5:8 test_pickle: The complete file name of a pickled cobra.Model or SBML XML file to be read. We currently provide Salmonella enterica Typhimurium and Escherichia coli models whose paths are stored in cobra.test.salmonella_pickle and cobra.test.ecoli_pickle, respectively. The ecoli model is a variant of the model published in Orth et al. 2011 Mol Syst Biol 7:535 """ from os import name as __name try: from cPickle import load except: from pickle import load try: with open(test_pickle, "rb") as infile: model = load(infile) except: #if the pickle can't be loaded then load the sbml xml from warnings import warn warn("Couldn't load %s. Loading the default model %s instead"%(test_pickle, salmonella_sbml)) sys.path.insert(0, cobra_location) from cobra.io import read_sbml_model model = read_sbml_model(salmonella_sbml) sys.path.pop(0) return model
def cameo_optim(isMultiProc, size): model = read_sbml_model(SBML_FILE) obj = biomass_product_coupled_yield( model.reactions.Ec_biomass_iAF1260_core_59p81M, model.reactions.EX_succ_e_, model.reactions.EX_glc_e_) ko = ReactionKnockoutOptimization(model=model, objective_function=obj, use_nullspace_simplification=False) if isMultiProc: res = ko.run(pop_size=100, max_generations=1, max_size=size, crossover_rate=0.9, mutation_rate=0.1, indel_rate=0.185, view=MultiprocessingView(processes=2)) else: res = ko.run(pop_size=100, max_generations=1, max_size=size, crossover_rate=0.9, mutation_rate=0.1, indel_rate=0.185) res.data_frame.to_csv(basePath + "Results/optim_Ec_iAF1260_ko_cameo.csv")
def test_2(): """Example of evaluation of RECON1 GPRs """ from urllib.request import urlretrieve from cobra.io import read_sbml_model import random path, _ = urlretrieve('http://bigg.ucsd.edu/static/models/RECON1.xml') model = read_sbml_model(path) ogpr = model.reactions.ATPS4m.gene_name_reaction_rule gpr = ogpr print(gpr) t = build_tree(gpr, Boolean) print(t) genes = list(t.get_operands()) print("GENES:\n", genes) print("Evaluations:") evaluator = BooleanEvaluator(genes) res = t.evaluate(evaluator.f_operand, evaluator.f_operator) print(evaluator.true_list, " ==> ", res) for _ in range(20): g = [] n = random.randint(1, len(genes)) for _ in range(n): i = random.randint(0, len(genes) - 1) g.append(genes[i]) evaluator.set_true_list(g) res = t.evaluate(evaluator.f_operand, evaluator.f_operator) print(evaluator.true_list, " ==> ", res)
def test_is_boundary_type(self): # TODO: implement test which doesn't account abount SBO terms, to see how compartment_id ... are managed # Load. rpsbml_ecoli = rpSBML(inFile=self.rpsbml_ecoli_path, logger=self.logger) reactions = rpsbml_ecoli.getModel().getListOfReactions() cobra_model = cobra_io.read_sbml_model(self.rpsbml_ecoli_path, use_fbs_package=True) # Return type. self.assertIsInstance( rpsbml_ecoli.is_boundary_type(reactions[0], 'exchange', ''), bool) # Exchange. rpsbml_exchange = [ x for x in reactions if rpsbml_ecoli.is_boundary_type(x, 'exchange', 'e') ] self.assertEqual(len(cobra_model.exchanges), len(rpsbml_exchange)) rpsbml_exchange = [ x for x in reactions if rpsbml_ecoli.is_boundary_type(x, 'exchange', '') ] self.assertEqual(len(cobra_model.exchanges), len(rpsbml_exchange)) # Demand. rpsbml_demands = [ x for x in reactions if rpsbml_ecoli.is_boundary_type(x, 'demand', '') ] self.assertEqual(len(cobra_model.demands), len(rpsbml_demands)) # Sinks. rpsbml_sinks = [ x for x in reactions if rpsbml_ecoli.is_boundary_type(x, 'sink', '') ] self.assertEqual(len(cobra_model.sinks), len(rpsbml_sinks))
def _load_model(self, file_storage): try: filename, content = self._decompress(file_storage.filename.lower(), file_storage) except IOError as err: msg = "Failed to decompress file." LOGGER.exception(msg) api.abort(400, msg, error=str(err)) try: if file_storage.mimetype in self.JSON_TYPES or \ filename.endswith("json"): LOGGER.debug("Loading model from JSON.") model = load_json_model(content) elif file_storage.mimetype in self.XML_TYPES or \ filename.endswith("xml") or filename.endswith("sbml"): LOGGER.debug("Loading model from SBML.") model = read_sbml_model(content) else: msg = f"'{file_storage.mimetype}' is an unhandled MIME type." LOGGER.error(msg) api.abort(415, msg, recognizedMIMETypes=list(chain( self.JSON_TYPES, self.XML_TYPES))) except (CobraSBMLError, ValueError) as err: msg = "Failed to parse model." LOGGER.exception(msg) api.abort(400, msg, error=str(err)) finally: content.close() file_storage.close() return model
def _removeDeadEnd(sbml_path): cobraModel = cobra_io.read_sbml_model(sbml_path, use_fbc_package=True) cobraModel = _reduce_model(cobraModel) with TemporaryDirectory() as tmpOutputFolder: cobra_io.write_sbml_model(cobraModel, os_path.join(tmpOutputFolder, 'tmp.xml')) rpsbml = rpSBML(os_path.join(tmpOutputFolder, 'tmp.xml')) return rpsbml
def model(model=None): if model is None: path = os.path.join(os.path.dirname(__file__), REPO_PATH + "/model/Sco-GEM.xml") print("Loading model {0}".format(path)) return read_sbml_model(path) else: return model
def _summarize_models(args): tid, row, new_path = args files = row["file"].split("|") if len(files) > 1: mod = join_models(files, id=tid) else: mod = read_sbml_model(files[0]) save_json_model(mod, new_path)
def model(): path = os.path.join(os.path.dirname(__file__), REPO_PATH + "/model/Sco-GEM.xml") model = read_sbml_model(path) try: model.solver = SOLVER except: pass return model
def _summarize_models(args): tid, row, new_path, folder = args files = [path.join(folder, r) for r in row["file"].split("|")] if len(files) > 1: mod = join_models(files, id=tid) save_json_model(mod, new_path) else: mod = read_sbml_model(path.join(folder, row["file"])) save_json_model(mod, new_path)
def get_model_from_uminho(query, index, host="http://darwin.di.uminho.pt/models", solver_interface=optlang, sanitize=True): model_index = index[index["name"] == query]['id'].values[0] sbml_file = get_sbml_file(model_index, host) sbml_file.close() model = read_sbml_model(sbml_file.name) model.solver = solver_interface if sanitize: sanitize_ids(model) return model
def test_groups(data_directory, tmp_path): """Testing reading and writing of groups""" sbml_path = join(data_directory, "e_coli_core.xml") model = read_sbml_model(sbml_path) assert model.groups is not None assert len(model.groups) == 10 g1 = model.groups[0] assert len(g1.members) == 6 temp_path = join(str(tmp_path), "test.xml") with open(temp_path, "w") as f_out: write_sbml_model(model, f_out) with open(temp_path, "r") as f_in: model2 = read_sbml_model(f_in) assert model2.groups is not None assert len(model2.groups) == 10 g1 = model2.groups[0] assert len(g1.members) == 6
def test_infinity_bounds(data_directory, tmp_path): """Test infinity bound example. """ sbml_path = join(data_directory, "fbc_ex1.xml") model = read_sbml_model(sbml_path) # check that simulation works solution = model.optimize() # check that values are set r = model.reactions.get_by_id("EX_X") assert r.lower_bound == -float("Inf") assert r.upper_bound == float("Inf") temp_path = join(str(tmp_path), "test.xml") with open(temp_path, "w") as f_out: write_sbml_model(model, f_out) with open(temp_path, "r") as f_in: model2 = read_sbml_model(f_in) r = model2.reactions.get_by_id("EX_X") assert r.lower_bound == -float("Inf") assert r.upper_bound == float("Inf")
def create_test_model(model_name="salmonella"): """Returns a cobra model for testing model_name: str One of 'ecoli', 'textbook', or 'salmonella', or the path to a pickled cobra.Model """ if model_name == "ecoli": ecoli_sbml = join(data_dir, "iJO1366.xml") return read_sbml_model(ecoli_sbml) elif model_name == "textbook": textbook_sbml = join(data_dir, "textbook.xml.gz") return read_sbml_model(textbook_sbml) elif model_name == "mini": mini_sbml = join(data_dir, "mini_fbc2.xml") return read_sbml_model(mini_sbml) elif model_name == "salmonella": salmonella_pickle = join(data_dir, "salmonella.pickle") model_name = salmonella_pickle with open(model_name, "rb") as infile: return _load(infile)
def test_model(): """Gets a small test model. Returns a small test model for the central carbon metabolism. Args: None: Returns: cobra model: A model of the central carbon metabolism. """ from os.path import split, join from cobra.io import read_sbml_model this_dir, _ = split(__file__) data_path = join(this_dir, "data", "cemet.xml") return read_sbml_model(data_path)
def load_cobra_model(path, notifications): """Load a COBRA model with meta information from an SBML document.""" doc = libsbml.readSBML(path) fbc = doc.getPlugin("fbc") sbml_ver = doc.getLevel(), doc.getVersion(), fbc if fbc is None else \ fbc.getVersion() with catch_warnings(record=True) as warnings: simplefilter("always") try: model = read_sbml_model(path) except Exception as err: notifications['errors'].append(str(err)) model = None validate = True else: validate = False notifications['warnings'].extend([str(w.message) for w in warnings]) if validate: run_sbml_validation(doc, notifications) return model, sbml_ver
def _load_model_from_file(path, handle): """Try to parse a model from a file handle using different encodings.""" logger.debug('Reading file from %s assuming pickled model.' % path) try: model = pickle.load(handle) except (TypeError, pickle.UnpicklingError): logger.debug('Cannot unpickle %s. Assuming json model next.' % path) try: model = load_json_model(path) except ValueError: logger.debug("Cannot import %s as json model. Assuming sbml model next." % path) try: model = read_sbml_model(path) except AttributeError as e: logger.error("cobrapy doesn't raise a proper exception if a file does not contain an SBML model") raise e except Exception as e: logger.error( "Looks like something blow up while trying to import {} as a SBML model." "Try validating the model at http://sbml.org/Facilities/Validator/ to get more information.".format( path)) raise e return model
try: from cPickle import load, dump except: from pickle import load, dump from json import dump as json_dump from collections import OrderedDict import cobra from cobra.version import get_version from cobra.io import read_sbml_model, write_sbml_model, save_matlab_model, \ save_json_model from cobra.io.sbml3 import write_sbml2 # ecoli ecoli_model = read_sbml_model("iJO1366.xml") with open("iJO1366.pickle", "wb") as outfile: dump(ecoli_model, outfile, protocol=2) # salmonella salmonella = read_sbml_model("salmonella.xml") with open("salmonella.genes", "rb") as infile: gene_names = load(infile) for gene in salmonella.genes: gene.name = gene_names[gene.id] with open("salmonella.media", "rb") as infile: salmonella.media_compositions = load(infile) with open("salmonella.pickle", "wb") as outfile: dump(salmonella, outfile, protocol=2) # create mini model from textbook
else: builder.display_in_browser() except ImportError: print("Escher must be installed in order to visualize maps") if __name__ == '__main__': import time from cobra.io import read_sbml_model from cameo import load_model # sbml_path = '../../tests/data/EcoliCore.xml' sbml_path = '../../tests/data/iJO1366.xml' cb_model = read_sbml_model(sbml_path) model = load_model(sbml_path) # model.solver = 'glpk' # print("cobra fba") # tic = time.time() # cb_model.optimize(solver='cglpk') # print("flux sum:", sum([abs(val) for val in list(cb_model.solution.fluxes.values())])) # print("cobra fba runtime:", time.time() - tic) # print("cobra pfba") # tic = time.time() # optimize_minimal_flux(cb_model, solver='cglpk') # print("flux sum:", sum([abs(val) for val in list(cb_model.solution.fluxes.values())])) # print("cobra pfba runtime:", time.time() - tic)
import sys, re, os, glob from cobra.core import Model from cobra.io import read_sbml_model,write_sbml_model folder = sys.argv[1] metamodel_id = sys.argv[2] assert os.path.isdir(folder) metamodel = Model(metamodel_id) metamodel.description = metamodel_id reactions = set() models = [] for fname in glob.glob(os.path.join(folder,"*.xml")): model = read_sbml_model(fname) models.append(model) print "%s loaded" % model.id for r in model.reactions: r.id = re.sub('_[ec][0-9]','',r.id) if r.id in reactions: continue metamodel.add_reaction(r.copy()) reactions.add(r.id) write_sbml_model(metamodel_id,metamodel)
def load_model(path_or_handle, solver_interface=optlang.glpk_interface, sanitize=True): """Read a metabolic model . Parameters ---------- path_or_handle : path, fhandle or name. One of: * file path of a model file; * file handle to a SBML or pickled model; or * the identifier of a model in a web database (optflux.org/models) solver_interface : solver_interface, optional E.g. optlang.glpk_interface or any other optlang interface. sanitize : boolean, optional If reaction and metabolite IDs should be sanitized (works only for SBML models). """ if isinstance(path_or_handle, str): path = path_or_handle try: handle = open(path_or_handle, 'rb') except IOError: logger.debug('%s not a file path. Querying webmodels ... trying http://bigg.ucsd.edu first' % path) try: return cameo.models.webmodels.get_model_from_bigg(path) except: logger.debug('%s not a file path. Querying webmodels ... trying minho next' % path) try: df = cameo.models.webmodels.index_models_minho() except requests.ConnectionError as e: logger.error("You need to be connected to the internet to load an online model.") raise e except Exception as e: logger.error("Something went wrong while looking up available webmodels.") raise e try: index = df.query('name == "%s"' % path_or_handle).id.values[0] handle = cameo.models.webmodels.get_sbml_file(index) path = handle.name except IndexError: raise ValueError("%s is neither a file nor a model ID." % path) elif hasattr(path_or_handle, 'read'): path = path_or_handle.name handle = path_or_handle else: raise ValueError('Provided argument %s has to be either a file path or handle' % path_or_handle) logger.debug('Reading file from %s assuming pickled model.' % path) try: model = pickle.load(handle) except Exception: logger.debug('Cannot unpickle %s. Assuming json model next.' % path) try: model = load_json_model(path) except Exception: logger.debug("Cannot import %s as json model. Assuming sbml model next." % path) try: model = read_sbml_model(path) except AttributeError as e: logger.error("cobrapy doesn't raise a proper exception if a file does not contain an SBML model") raise e except Exception as e: logger.error( "Looks like something blow up while trying to import {} as a SBML model. Try validating the model at http://sbml.org/Facilities/Validator/ to get more information.".format( path)) raise e if sanitize: sanitize_ids(model) if not isinstance(model, SolverBasedModel): if solver_interface is not None: logger.debug("Changing solver interface to %s" % solver_interface) model = to_solver_based_model(model, solver_interface=solver_interface) else: if model.interface is not solver_interface and solver_interface is not None: logger.debug("Changing solver interface to %s" % solver_interface) model.solver = solver_interface return model
def get_model_from_uminho(index, host="http://darwin.di.uminho.pt/models"): sbml_file = get_sbml_file(index, host) sbml_file.close() return to_solver_based_model(read_sbml_model(sbml_file.name))
ax2.plot(df.time, 1E6*df.ATPPROD) ax2.set_ylabel("reaction rate 1E-6[mmole/s]") for ax in (ax1, ax2): ax.legend() ax.set_xlabel("time [s]") plt.show() f.savefig("./results/{}_{}_roadrunner.png".format(model.mid, model.version), bbox_inches="tight") # ----------------------------------------------------------------------------- # fba simulation # ----------------------------------------------------------------------------- model = read_sbml_model(tiny_sbml) print(model) # Iterate through the the objects in the model print("Reactions") print("---------") for x in model.reactions: print("%s : %s [%s<->%s]" % (x.id, x.reaction, x.lower_bound, x.upper_bound)) print("") print("Metabolites") print("-----------") for x in model.metabolites: print('%9s (%s) : %s, %s, %s' % (x.id, x.compartment, x.formula, x.charge, x.annotation))
def test_sbml_error(data_directory): filename = join(data_directory, "invalid0.xml") with pytest.raises(io.sbml3.CobraSBMLError): io.read_sbml_model(filename)
reaction_table = read_seed_table(reactions_tab_file) compounds_table = read_seed_table(compounds_tab_file) ###################################################### # PREPARE THE MODEL # ###################################################### if len(sys.argv) < 2: print "Error: no SBML file name supplied" sys.exit(1) else: model_file = sys.argv[1] try: model = read_sbml_model(model_file) except: print "Invalid SBML for file %s" % model_file usage() sys.exit(1) if len(sys.argv) >= 3: output_folder = sys.argv[2] if not os.path.isdir(output_folder): print "Invalid output folder %s" % output_folder usage() sys.exit(1) else: output_folder = "./"
import pandas from sympy import Eq from cameo import load_model, Reaction, Model from cameo.config import solvers from cameo.exceptions import UndefinedSolution from cameo.core.solver_based_model import Reaction from cameo.util import TimeMachine import six TRAVIS = os.getenv('TRAVIS', False) TESTDIR = os.path.dirname(__file__) REFERENCE_FVA_SOLUTION_ECOLI_CORE = pandas.read_csv(os.path.join(TESTDIR, 'data/REFERENCE_flux_ranges_EcoliCore.csv'), index_col=0) TESTMODEL = load_model(os.path.join(TESTDIR, 'data/EcoliCore.xml'), sanitize=False) COBRAPYTESTMODEL = read_sbml_model(os.path.join(TESTDIR, 'data/EcoliCore.xml')) ESSENTIAL_GENES = ['b2779', 'b1779', 'b0720', 'b0451', 'b2416', 'b2926', 'b1136', 'b2415'] ESSENTIAL_REACTIONS = ['GLNS', 'Biomass_Ecoli_core_N_LPAREN_w_FSLASH_GAM_RPAREN__Nmet2', 'PIt2r', 'GAPD', 'ACONTb', 'EX_nh4_LPAREN_e_RPAREN_', 'ENO', 'EX_h_LPAREN_e_RPAREN_', 'EX_glc_LPAREN_e_RPAREN_', 'ICDHyr', 'CS', 'NH4t', 'GLCpts', 'PGM', 'EX_pi_LPAREN_e_RPAREN_', 'PGK', 'RPI', 'ACONTa'] class WrappedCommonGround: class CommonGround(unittest.TestCase): def setUp(self): self.model = TESTMODEL.copy() self.model.optimize() class AbstractTestLazySolution(WrappedCommonGround.CommonGround): def setUp(self):
if len(sys.argv) >= 3: param_fname = sys.argv[2] else: param_fname = './parameters.json' try: settings.load_parameters(param_fname) print "Parameters loaded from %s " % param_fname except Exception, e: print "The parameter file %s not found, running with defaul parameter" % param_fname print str(e) sys.exit(0) print "Reading Metamodel", settings.METAMODEL_PATH, metamodel = read_sbml_model(settings.METAMODEL_PATH) print " - loaded!" ec = '^[1-6]\.[0-9][0-9]*\.[0-9][0-9]*' ECs_rxns = utils.read_ec_numbers(settings.RXN2ECS_PATH) rxn2ec = {r.id:r.annotation['ec_number'] for r in metamodel.reactions if re.search(ec,r.annotation['ec_number'])} ###################################################### # PREPARE THE MODEL # ###################################################### try: model = read_sbml_model(model_file) print "Model %s loaded" % model.id
def example_model(test_model_files): return read_sbml_model(test_model_files[0]['path'])
#!/usr/bin/env python #This script regenerates pickles of cobra Models. Should be #performed after updating core classes to prevent subtle bugs. from cPickle import load, dump from cobra import Model from cobra.version import get_version from cobra.io import read_sbml_model, read_legacy_sbml from cobra.test import create_test_model model_names = ['salmonella', 'iJO1366', 'Yersinia_pestis_CO92_iPC815'] for model_name in model_names: model_pickle = model_name + '.pickle' old_model = create_test_model(model_pickle) if model_name == "iJO1366": new_model = read_legacy_sbml(model_name + '.xml') else: new_model = read_sbml_model(model_name + '.xml') [setattr(x, 'name', old_model.genes.get_by_id(x.id).name) for x in new_model.genes] if hasattr(old_model, 'media_compositions'): new_model.media_compositions = old_model.media_compositions new_model._cobra_version = get_version() dump(new_model, open(model_pickle, 'w'))
# you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from cobra.io import read_sbml_model from optlang import glpk_interface import inspyred from cameo.strain_design.heuristic.multiprocess import MultiprocessReactionKnockoutOptimization from cameo.strain_design.heuristic.objective_functions import biomass_product_coupled_yield from cameo.flux_analysis.simulation import fba from cameo.core.solver_based_model import to_solver_based_model model = read_sbml_model("../tests/data/iJO1366.xml") model = to_solver_based_model(model, solver_interface=glpk_interface) of = biomass_product_coupled_yield("Ec_biomass_iJO1366_core_53p95M", "EX_ac_LPAREN_e_RPAREN_", "EX_glc_LPAREN_e_RPAREN_") mp = MultiprocessReactionKnockoutOptimization(model=model, heuristic_method=inspyred.ec.GA, objective_function=of, simulation_method=fba) mp.run(max_evaluations=300, n=2)
def test_read_sbml_annotations(data_directory): """Test reading and writing annotations.""" with open(join(data_directory, "annotation.xml"), "r") as f_in: model1 = read_sbml_model(f_in) _check_sbml_annotations(model1)