def test_model_history(tmp_path): """Testing reading and writing of ModelHistory.""" model = Model("test") model._sbml = { "creators": [{ "familyName": "Mustermann", "givenName": "Max", "organisation": "Muster University", "email": "*****@*****.**", }] } sbml_path = join(str(tmp_path), "test.xml") with open(sbml_path, "w") as f_out: write_sbml_model(model, f_out) with open(sbml_path, "r") as f_in: model2 = read_sbml_model(f_in) assert "creators" in model2._sbml assert len(model2._sbml["creators"]) is 1 c = model2._sbml["creators"][0] assert c["familyName"] == "Mustermann" assert c["givenName"] == "Max" assert c["organisation"] == "Muster University" assert c["email"] == "*****@*****.**"
def create_model(model, constraining_dict, lethal_df, save_fn=None, model_id=None, skip=[]): # Disregard all reactions in the first column of lethal_df lethal_reactions = list( set( list(lethal_df.loc[:, "rxn1"]) + list(lethal_df.loc[:, "rxn2"]) + list(lethal_df.loc[:, "rxn3"]))) lethal_reactions += skip i = 0 for r_id, bounds in constraining_dict.items(): if r_id in lethal_reactions: continue i += 1 r = model.reactions.get_by_id(r_id) logging.info("Change direction of reaction {0} from {1} to {2}".format( r_id, r.bounds, bounds)) r.bounds = bounds logging.debug(model.optimize()) logging.info( "Changed the reversibility of {0} reactions in total".format(i)) if model_id: model.id = model_id if save_fn: write_sbml_model(model, save_fn) return model
def check_feasibility(metabolic_model, time_limit=60, tolerance_feasibility=1e-6, pool=None, debug=True): #Some solvers may crash python with unfeasible parameters, by creating a new process we prevent that error from crashing the main process #model_copy=copy.deepcopy(metabolic_model) if pool == None: pool = Pool(processes=1) task_start = time.time() # start time task = pool.map_async(get_status, [{ "model": metabolic_model, "tolerance_feasibility": tolerance_feasibility }]) while not (task.ready()): #print task._number_left if ( time.time() - task_start ) > time_limit: # check maximum time (user def.) print "timeout" pool.terminate() # kill old pool timeout = True # redo computation pool = Pool(processes=1) if debug: print "infeasible" write_sbml_model(metabolic_model, "feasibility.sbml") return "infeasible", pool status = task.get()[0] #pool.close() if debug: print status return status, pool
def test_notes(tmp_path): """Testing if model notes are written in SBML""" path_to_file = join(str(tmp_path), "model_notes.xml") # making a minimal cobra model to test notes model = cobra.Model("e_coli_core") model.notes["Remark"] = "...Model Notes..." met = cobra.Metabolite("pyr_c", compartment="c") model.add_metabolites([met]) met.notes["Remark"] = "Note with \n newline" rxn = cobra.Reaction("R_ATPM") model.add_reactions([rxn]) rxn.notes["Remark"] = "What about me?" model.objective_direction = "max" model.objective = rxn write_sbml_model(model, path_to_file) # reading the model back model_after_reading = read_sbml_model(path_to_file) met_after_reading = model_after_reading.metabolites.get_by_id("pyr_c") reaction_after_reading = model_after_reading.reactions.get_by_id("R_ATPM") # checking if notes are written to model assert model_after_reading.notes["Remark"] == "...Model Notes..." # checking notes for metabolite and reaction assert met_after_reading.notes["Remark"] == "Note with \n newline" assert reaction_after_reading.notes["Remark"] == "What about me?"
def save_model_to_file(model, filename): """ Save a model to a file based on the extension of the file name. Parameters ---------- model : cobra.core.Model Model object loaded from file filename : str Path to model file Raises ------ IOError If model file extension is not supported. """ (root, ext) = splitext(filename) if ext == '.mat': save_matlab_model(model, filename) elif ext == '.xml' or ext == '.sbml': write_sbml_model(model, filename) elif ext == '.json': save_json_model(model, filename) else: raise IOError( 'Model file extension not supported for {0}'.format(filename)) return
def _removeDeadEnd(sbml_path): cobraModel = cobra_io.read_sbml_model(sbml_path, use_fbc_package=True) cobraModel = _reduce_model(cobraModel) with TemporaryDirectory() as tmpOutputFolder: cobra_io.write_sbml_model(cobraModel, os_path.join(tmpOutputFolder, 'tmp.xml')) rpsbml = rpSBML(os_path.join(tmpOutputFolder, 'tmp.xml')) return rpsbml
def test_stable_gprs(data_directory, tmp_path): mini = read_sbml_model(join(data_directory, "mini_fbc2.xml")) mini.reactions.GLCpts.gene_reaction_rule = "((b2415 and b2417)or (b2416))" fixed = join(str(tmp_path), "fixed_gpr.xml") write_sbml_model(mini, fixed) fixed_model = read_sbml_model(fixed) assert (fixed_model.reactions.GLCpts.gene_reaction_rule == "(b2415 and b2417) or b2416")
def write_yeast_model(model): """Writes the SBML file of the yeast model using COBRA. Parameters ---------- model : cobra.core.Model Yeast model to be written """ write_sbml_model(model, MODEL_PATH)
def raise_libsbml_errors(): with pytest.raises(ImportError): io.read_sbml_model('test') with pytest.raises(ImportError): io.write_sbml_model(None, 'test') with pytest.raises(ImportError): io.load_matlab_model('test') with pytest.raises(ImportError): io.write_legacy_sbml(None, 'test') with pytest.raises(ImportError): io.read_legacy_sbml(None, 'test')
def test_read_write_sbml_annotations(data_directory, tmp_path): """Test reading and writing annotations.""" with open(join(data_directory, "annotation.xml"), "r") as f_in: model1 = read_sbml_model(f_in) sbml_path = join(str(tmp_path), "test.xml") with open(sbml_path, "w") as f_out: write_sbml_model(model1, f_out) with open(sbml_path, "r") as f_in: model2 = read_sbml_model(f_in) _check_sbml_annotations(model2)
def update_local_models(model_id, model_store=None): """Update locally stored models. Annotate model metabolites with CHEBI identifiers and store them locally for easy access. :param model_id: string, model identifier :param model_store: path to directory where to store the processed models. """ model_store = model_store or 'data/models' if model_id in LOCAL_MODELS: sbml_file = os.path.join(model_store, 'original', model_id + '.sbml.gz') model = read_sbml_model(sbml_file) else: model = load_model(model_id) # annotate metabolites namespace = storage.get(model_id).namespace metabolite_namespace = MODEL_METABOLITE_NAMESPACE[model_id] db_name = 'CHEBI' metabolites_missing_annotation = [ m.id for m in model.metabolites if len(m.annotation.get(db_name, [])) < 1 ] model_xref = sync_query_identifiers([ strip_compartment[namespace](mid) for mid in metabolites_missing_annotation ], namespace, db_name) for metabolite_id in metabolites_missing_annotation: compound_id = strip_compartment[namespace](metabolite_id) if compound_id in model_xref: metabolite = model.metabolites.get_by_id(metabolite_id) if db_name not in metabolite.annotation: metabolite.annotation[db_name] = [] metabolite.annotation[db_name].extend([ f'{db_name}:{i}' if not i.startswith(f"{db_name}:") else i for i in model_xref[compound_id] ]) # TODO: For some reason, id-mapper doesn't make this link, add manually for now if compound_id in GLUCOSE and db_name == 'CHEBI': metabolite.annotation[db_name].append('CHEBI:42758') if metabolite_namespace not in metabolite.annotation: metabolite.annotation[metabolite_namespace] = [] metabolite.annotation[metabolite_namespace].append(compound_id) # gecko protein exchanges db_name = 'uniprot' protein_exchanges = model.reactions.query( lambda rxn: re.match(r'^prot_.*_exchange$', rxn.id)) for rxn in protein_exchanges: rxn.annotation[db_name] = [ re.findall('^prot_(.*)_exchange$', rxn.id)[0] ] write_sbml_model(model, os.path.join(model_store, model_id + '.sbml.gz'))
def main(args): '''main method.''' model = read_sbml_model(args[0]) # Create updated model: build(model) add_creator(model, *args[3:7]) # Write updated model: out_filename = os.path.join(args[1], '%s.xml' % args[2]) makedirs(out_filename) write_sbml_model(model, out_filename)
def test_load_json_model_valid(data_directory, tmp_path): """Test loading a valid annotation from JSON.""" path_to_file = join(data_directory, "valid_annotation_format.json") model = load_json_model(path_to_file) expected = { 'bigg.reaction': [['is', 'PFK26']], 'kegg.reaction': [['is', 'R02732']], 'rhea': [['is', '15656']] } for metabolite in model.metabolites: assert metabolite.annotation == expected path_to_output = join(str(tmp_path), 'valid_annotation_output.xml') write_sbml_model(model, path_to_output)
def test_create_universal(self, test_folder): universal = cobrababel.create_metanetx_universal_model() assert universal.id == 'metanetx_universal' assert len(universal.reactions) >= 42952 assert len(universal.metabolites) >= 31130 file_name = join(test_folder, 'metanetx.xml') write_sbml_model(universal, file_name) model, errors = validate_sbml_model(file_name) assert len(errors['other']) == 0 assert len(errors['SBML errors']) == 0 assert len(errors['warnings']) == 0 assert len(errors['validator']) >= 325 unlink(file_name)
def test_load_json_model_valid(data_directory, tmp_path): """Test loading a valid annotation from JSON.""" path_to_file = join(data_directory, "valid_annotation_format.json") model = load_json_model(path_to_file) expected = { "bigg.reaction": [["is", "PFK26"]], "kegg.reaction": [["is", "R02732"]], "rhea": [["is", "15656"]], } for metabolite in model.metabolites: assert metabolite.annotation == expected path_to_output = join(str(tmp_path), "valid_annotation_output.xml") write_sbml_model(model, path_to_output)
def excel_sbml(modelname,outputname): import datetime import cobra from cobra import Metabolite, Reaction, Model from cobra.io import write_sbml_model #from __future__ import print_function import pandas as pd #myfolder='/media/jupyter/zhang_x/study/' #mname="seed_1" #若需包括更多代谢物信息,则可先在模型中创建代谢物对象并添加到模型 #A = Metabolite('A') #model.add_metabolites([A, B, C, D, E, P]) starttime = datetime.datetime.now() model = Model('model') colnames=['id','reaction_eq','name','lower_bound','upper_bound','Object'] #将上述值改为excel表格中的相应列名称数据以便正确读出值,表格中不一定包括所有列,不需要改变列的顺序,通过列头识别 #objr='Objective' #目标反应的名字 #rin=[['PGI',-10,10]] #可设置多个输入反应及其上下限 #若表格中不包括目标反应数据需人为给出,同样对输入反应需人为添加约束,若已有相应数据则这两个值不用改 #data = pd.read_csv(mname+'.csv', delimiter=",", na_values=['(none)']).fillna('') data = pd.read_excel(modelname, sheet_name='reactions', header=0, na_values=['(none)']).fillna('') #'(none)'变NaN,NaN数据用''填充 #print(data) #直接从excel文件读取不用再转换txt,header行为数据起始行和表头(因前面行可能有模型说明文件) 表中空值替换为空字符以便处理,不能用dropna,否则整行数据都会丢掉 keys=data.keys() for index, row in data.iterrows(): r = Reaction(row[colnames[0]].strip()) #r即每个反应对应的反应名字(name) model.add_reaction(r) r.build_reaction_from_string(row[colnames[1]],fwd_arrow='-->', rev_arrow='<--', reversible_arrow='<=>', term_split='+') #excel中方程式colnames[1],转成SBML格式 if colnames[2] in keys: r.subsystem=row[colnames[2]] if colnames[3]in keys: r.lower_bound=row[colnames[3]] r.upper_bound=row[colnames[4]] if colnames[4] in keys: #目标反应 if row[colnames[5]]: r.objective_coefficient=row[colnames[5]] #if colnames[5] in keys: # if row[colnames[5]]: # r.objective_coefficient=1 #s=row[colnames[6]] #对基因关系处理要看表格中是如何存储and/or关系的 #if s: # genes=s.split(", ") # r.gene_reaction_rule='('+" or ".join(genes)+' )' #if colnames[3] not in keys: #需人为设定输入反应边界,注意根据反应方向确定是改下限还是上限及值的正负 # for r in rin: # rea=model.reactions.get_by_id(r[0]) # rea.lower_bound=r[1] # rea.upper_bound=r[2] write_sbml_model(model, outputname)
def test_filehandle(data_directory, tmp_path): """Test reading and writing to file handle.""" with open(join(data_directory, "mini_fbc2.xml"), "r") as f_in: model1 = read_sbml_model(f_in) assert model1 is not None sbml_path = join(str(tmp_path), "test.xml") with open(sbml_path, "w") as f_out: write_sbml_model(model1, f_out) with open(sbml_path, "r") as f_in: model2 = read_sbml_model(f_in) TestCobraIO.compare_models(name="filehandle", model1=model1, model2=model2)
def mvgem(input_model, output_model): """Convert GEM format INPUT_MODEL to OUTPUT_MODEL. The format will be infered based on the extension of OUTPUT_MODEL. Supported formats: .sbml, .xml, .json, .mat """ model = load_model(input_model) out_format = get_destination_format(output_model) if out_format == "SBML": write_sbml_model(model, output_model) elif out_format == "MAT": save_matlab_model(model, output_model) elif out_format == "JSON": save_json_model(model, output_model) else: click.BadParameter( "Output format %s could not be recognised" % out_format ).show()
def test_gprs(data_directory, tmp_path): """Test that GPRs are written and read correctly""" model1 = read_sbml_model(join(data_directory, "iJO1366.xml.gz")) sbml_path = join(str(tmp_path), "test.xml") with open(sbml_path, "w") as f_out: write_sbml_model(model1, f_out) with open(sbml_path, "r") as f_in: model2 = read_sbml_model(f_in) for r1 in model1.reactions: rid = r1.id r2 = model2.reactions.get_by_id(rid) gpr1 = r1.gene_reaction_rule gpr2 = r2.gene_reaction_rule assert gpr1 == gpr2
def test_groups(data_directory, tmp_path): """Testing reading and writing of groups""" sbml_path = join(data_directory, "e_coli_core.xml") model = read_sbml_model(sbml_path) assert model.groups is not None assert len(model.groups) == 10 g1 = model.groups[0] assert len(g1.members) == 6 temp_path = join(str(tmp_path), "test.xml") with open(temp_path, "w") as f_out: write_sbml_model(model, f_out) with open(temp_path, "r") as f_in: model2 = read_sbml_model(f_in) assert model2.groups is not None assert len(model2.groups) == 10 g1 = model2.groups[0] assert len(g1.members) == 6
def create_sbml(reaction_genes, reactions_metabolites, output_file): model = Model() metabolites_created = [] for reaction_id in reaction_genes: reaction = Reaction(reaction_id) reaction.name = reaction_id reaction_metabolites = {} for reactant_id in reactions_metabolites[reaction_id][0]: if reactant_id not in metabolites_created: reactant_metabolite = Metabolite(reactant_id, compartment='c') reaction_metabolites[reactant_metabolite] = -1.0 for product_id in reactions_metabolites[reaction_id][1]: if product_id not in metabolites_created: product_metabolite = Metabolite(product_id, compartment='c') reaction_metabolites[product_metabolite] = 1.0 reaction.add_metabolites(reaction_metabolites) reaction.notes['GENE_ASSOCIATION'] = '(' + ' or ' .join(reaction_genes[reaction_id]) + ')' model.add_reactions([reaction]) write_sbml_model(model, output_file)
def test_infinity_bounds(data_directory, tmp_path): """Test infinity bound example. """ sbml_path = join(data_directory, "fbc_ex1.xml") model = read_sbml_model(sbml_path) # check that simulation works solution = model.optimize() # check that values are set r = model.reactions.get_by_id("EX_X") assert r.lower_bound == -float("Inf") assert r.upper_bound == float("Inf") temp_path = join(str(tmp_path), "test.xml") with open(temp_path, "w") as f_out: write_sbml_model(model, f_out) with open(temp_path, "r") as f_in: model2 = read_sbml_model(f_in) r = model2.reactions.get_by_id("EX_X") assert r.lower_bound == -float("Inf") assert r.upper_bound == float("Inf")
def check_feasibility(metabolic_model,time_limit=60,tolerance_feasibility=1e-6,pool=None,debug=True): #Some solvers may crash python with unfeasible parameters, by creating a new process we prevent that error from crashing the main process #model_copy=copy.deepcopy(metabolic_model) if pool==None: pool = Pool(processes=1) task_start = time.time() # start time task = pool.map_async(get_status,[{"model":metabolic_model,"tolerance_feasibility":tolerance_feasibility}]) while not(task.ready()): #print task._number_left if (time.time() - task_start) > time_limit: # check maximum time (user def.) print "timeout" pool.terminate() # kill old pool timeout = True # redo computation pool = Pool(processes=1) if debug: print "infeasible" write_sbml_model(metabolic_model, "feasibility.sbml") return "infeasible",pool status=task.get()[0] #pool.close() if debug: print status return status,pool
def main(args): '''main method.''' model = read_sbml_model(args[0]) # Get biomass MW: print(get_mw(model, 's_0450__91__c__93__')) # Create updated model: build(model) add_creator(model, *args[3:7]) # Write updated model: makedirs(args[1]) write_sbml_model(model, os.path.join(args[1], '%s.xml' % args[2])) to_df(model).to_csv(os.path.join(args[1], '%s.csv' % args[2])) # Simulate updated model: react_flux_df = simulate(model, args[1]) # Save and plot: react_flux_df.to_csv(os.path.join(args[1], '%s.csv' % react_flux_df.name)) plot(react_flux_df, 'flux / mmol h-1', os.path.join(args[1], '%s.png' % react_flux_df.name))
print "\n check reaction mass balance" fixed_mass_balance = open("fixed_mass_balance.txt", "w") for r in model.reactions: if r.check_mass_balance() != [] and r.id[:3] != "EX_": if not cyc.fix_mass_balance(r, model, fixed_mass_balance): print "\t", r, "is not balanced!" print >> mass_balance, r.id, r.name, r.check_mass_balance() for pwy in p_ignored_set: print >> p_ignored, pwy, org.get_name_string(pwy), p_ignored_set[pwy] p_generic_set[org.get_name_string(pwy)] = len(p_ignored_set[pwy]) for s in sorted(m_generic_set.iteritems(), key=operator.itemgetter(1)): print >> m_generic, s[0], s[1] for s in sorted(p_generic_set.iteritems(), key=operator.itemgetter(1)): print >> m_generic, s[0], s[1] print "\n---\n%i reaction in model" % len(model.reactions) print "%i metabolites in model" % len(model.metabolites) print "%i genes in model\n---\n" % len(model.genes) print "reactions in database:", r_total print "generic reactions:", r_generic print "generic metabolites:", len(m_generic_set.keys()) print "ignored reactions:", len(r_ignored_set) print "thus incomplete pathways:", len(p_ignored_set), "\n" sbml_out_file = answer_org + ".xml" write_sbml_model(model, sbml_out_file, use_fbc_package=False)
from cobra.test import create_test_model model_names = ['salmonella', 'iJO1366', 'Yersinia_pestis_CO92_iPC815'] for model_name in model_names: # read in old pickle and model from sbml model_pickle = model_name + '.pickle' if model_name == "iJO1366": new_model = read_legacy_sbml(model_name + '.xml') else: new_model = read_sbml_model(model_name + '.xml') # update other attributes if isfile(model_name + ".genes"): with open(model_name + ".genes", "rb") as infile: gene_names = load(infile) for gene in new_model.genes: gene.name = gene_names[gene.id] if isfile(model_name + ".media"): with open(model_name + ".media", "rb") as infile: new_model.media_compositions = load(infile) new_model._cobra_version = get_version() # write out new pickle with open(model_pickle, 'wb') as outfile: dump(new_model, outfile, protocol=2) # write out other formats for iJO1366 if model_name == "iJO1366": save_matlab_model(new_model, model_name + ".mat") save_json_model(new_model, model_name + ".json") if model_name == "salmonella": write_sbml_model(new_model, model_name + "_fbc.xml")
for g in mini.genes: try: tg = textbook.genes.get_by_id(g.id) except KeyError: continue g.name = tg.name g.annotation = tg.annotation mini.reactions.sort() mini.genes.sort() mini.metabolites.sort() # output to various formats with open("mini.pickle", "wb") as outfile: dump(mini, outfile, protocol=2) save_matlab_model(mini, "mini.mat") save_json_model(mini, "mini.json", pretty=True) write_sbml_model(mini, "mini_fbc2.xml") write_sbml_model(mini, "mini_fbc2.xml.bz2") write_sbml_model(mini, "mini_fbc2.xml.gz") write_sbml2(mini, "mini_fbc1.xml", use_fbc_package=True) write_sbml_model(mini, "mini_cobra.xml", use_fbc_package=False) raven = load_matlab_model("raven.mat") with open("raven.pickle", "wb") as outfile: dump(raven, outfile, protocol=2) # fva results fva_result = cobra.flux_analysis.flux_variability_analysis(textbook) clean_result = OrderedDict() for key in sorted(fva_result): clean_result[key] = {k: round(v, 5) for k, v in fva_result[key].items()} with open("textbook_fva.json", "w") as outfile: json_dump(clean_result, outfile)
def excel_to_sbml(file_excel_path, file_sbml_path,model_id="default_model",**kwargs): write_sbml_model(import_excel_model(file_excel_path,model_id),file_sbml_path,**kwargs)
# ASSERTION 2: Adding some dummy carbon source (glc,fum,pyr) => there should be growth sol = set_medium(tmpmod, minmed + ["cpd00027", "cpd00020", "cpd00106"], verbose=False).optimize() print "Positive growth control", sol.status, sol.objective_value if sol.status == "optimal" and round(sol.objective_value, 3) < 0: print "No growth possible, check minimal medium?" sys.exit() # 5) Try Gapfilling for each exchange reaction and carbon source print "\nStarting gapfilling" tmpmod = set_medium(tmpmod, minmed, verbose=False) debugmod = tmpmod.copy() debugmod.add_reactions( [r for r in refmod.reactions if r not in debugmod.reactions]) write_sbml_model(debugmod, filename=fileID + "_debug.xml") Nrea = len(tmpmod.reactions) s1 = [ex for ex in substances["exid_seed"].dropna().values] s2 = [ex.id for ex in tmpmod.exchanges] csources = set(s1 + s2) tmpmod = add_Exchanges(tmpmod, set(s1).difference(set(s2))) # add exchange reactions newmod = add_Exchanges(newmod, set(s1).difference(set(s2))) # add exchange reactions Nfix = 0 for cs in csources: csname = tmpmod.reactions.get_by_id(cs).metabolites.keys()[0].name med = minmed + [cs] tmpmod = set_medium(tmpmod, med, verbose=False) sol = tmpmod.slim_optimize() if round(sol, 6) > 0:
# You should have received a copy of the GNU General Public License along # with this program; if not, write to the Free Software Foundation, Inc., # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. import sys, re, os, glob from cobra.core import Model from cobra.io import read_sbml_model, write_sbml_model folder = sys.argv[1] metamodel_id = sys.argv[2] assert os.path.isdir(folder) metamodel = Model(metamodel_id) metamodel.description = metamodel_id reactions = set() models = [] for fname in glob.glob(os.path.join(folder, "*.xml")): model = read_sbml_model(fname) models.append(model) print "%s loaded" % model.id for r in model.reactions: r.id = re.sub('_[ec][0-9]', '', r.id) if r.id in reactions: continue metamodel.add_reaction(r.copy()) reactions.add(r.id) write_sbml_model(metamodel_id, metamodel)
def test_sbml_write(self): test_output_filename = join(gettempdir(), 'test_sbml_write.xml') io.write_sbml_model(self.model, test_output_filename) #cleanup the test file unlink(test_output_filename)
mini.reactions.get_by_id(i).lower_bound = mini.reactions.PGI.lower_bound # set names and annotation for g in mini.genes: try: tg = textbook.genes.get_by_id(g.id) except KeyError: continue g.name = tg.name g.annotation = tg.annotation mini.reactions.sort() mini.genes.sort() mini.metabolites.sort() # output to various formats with open("mini.pickle", "wb") as outfile: dump(mini, outfile, protocol=2) save_matlab_model(mini, "mini.mat") save_json_model(mini, "mini.json", pretty=True) write_sbml_model(mini, "mini_fbc2.xml") write_sbml_model(mini, "mini_fbc2.xml.bz2") write_sbml_model(mini, "mini_fbc2.xml.gz") write_sbml2(mini, "mini_fbc1.xml", use_fbc_package=True) write_sbml_model(mini, "mini_cobra.xml", use_fbc_package=False) # fva results fva_result = cobra.flux_analysis.flux_variability_analysis(textbook) clean_result = OrderedDict() for key in sorted(fva_result): clean_result[key] = {k: round(v, 5) for k, v in fva_result[key].items()} with open("textbook_fva.json", "w") as outfile: json_dump(clean_result, outfile)
if len(gapsol[0]) > 0: Cfix += 1 print "\t => could be fixed:", ",".join( [r.id for r in gapsol[0]]) modnew.add_reactions( [r for r in gapsol[0] if r not in modnew.reactions]) print "\nTotal compounds:", Call, "\t can be produced:", Cwork, "\t could be fixed", Cfix, "\t altogether:", round( 100 * float(Cwork + Cfix) / Call, 1), "%", " (before:", round(100 * float(Cwork) / Call, 1), "% )" if fill_gaps: return (modnew) mod.add_reactions([ex for ex in refmod.exchanges if ex not in mod.reactions]) checkProduction(mod, "atp[c]") sol = mod.optimize() if sol.status != "optimal" or round(sol.objective_value, 6) == 0: print "no biomass production possible try to fix" tmpmod = mod.copy() for ex in tmpmod.exchanges: ex.lower_bound = -1000 gapsol = GapFiller(tmpmod, refmod, demand_reactions=False, integer_threshold=1e-16).fill() print gapsol modnew = checkBiomass(mod, fill_gaps=True) fileID = os.path.splitext(os.path.basename(sys.argv[1]))[0] write_sbml_model(modnew, filename=fileID + "_gapfilled.xml")
### Change the MNXM IDs to Bigg IDs for met in model.metabolites: if "bigg.metabolite" not in met.annotation.keys(): continue if met.id in [ "cpd00261_c0", "cpd06227_c0", "cpd03572_c0", "cpd02446_c0", "cpd02572_c0", "cpd01466_c0" ]: pass else: #if met.id.startswith('cpd'): split, compartment = met.id.split("_") met.id met.id = met.annotation['bigg.metabolite'] + '_' + compartment model.repair() print(met.id) #Number of metabolites without Bigg ID not_in_bigg = [] for met in model.metabolites: if "bigg.metabolite" not in met.annotation: not_in_bigg.append(met.id) len(not_in_bigg) ### Save draft with mapped Bigg IDs write_sbml_model(model, "/Users/lizrad/Dev/iVnat/iVnat.xml")
def test_sbml_write(self): test_output_filename = "test_sbml_write.xml" io.write_sbml_model(self.model, test_output_filename) # cleanup the test file unlink(test_output_filename)
clean = True if clean: aux = consistent_model.copy() biomass = aux.reactions.get_by_id(biomass.id) aux.remove_reactions([r for r in aux.reactions if r.startswith('EFF') and r._metabolites.keys()[0] in biomass.metabolites]) new_blocked_reactions = find_blocked_reactions(aux) if len(new_blocked_reactions) == 0: print "Cleaning model" consistent_model = aux gapfilling_reactions = [r for r in gapfilling_reactions if r in consistent_model.reactions] final_model_reactions = {r.id for r in consistent_model.reactions} sbml_out = '.'.join([model.id,settings.OUTPUT_SUFIX,"xml"]) sbml_out = os.path.join(settings.OUTPUT_FOLDER,sbml_out) write_sbml_model(consistent_model,sbml_out,use_fbc_package=False) print "Curated model %s saved as %s" % (model.id,sbml_out) print "=====================================================" print "MODEL_ID\tRXN_initial\tBLK_curated\tBLK_Removed\tGF\tRXN_final" print "%s\t%i\t%i\t%i\t%i\t%i" % ( consistent_model.id, len(initial_reactions), len(blocked_curated), len(initial_reactions - final_model_reactions), len(gapfilling_reactions), len(final_model_reactions) ) print "====================================================="
import sys, re, os, glob from cobra.core import Model from cobra.io import read_sbml_model,write_sbml_model folder = sys.argv[1] metamodel_id = sys.argv[2] assert os.path.isdir(folder) metamodel = Model(metamodel_id) metamodel.description = metamodel_id reactions = set() models = [] for fname in glob.glob(os.path.join(folder,"*.xml")): model = read_sbml_model(fname) models.append(model) print "%s loaded" % model.id for r in model.reactions: r.id = re.sub('_[ec][0-9]','',r.id) if r.id in reactions: continue metamodel.add_reaction(r.copy()) reactions.add(r.id) write_sbml_model(metamodel_id,metamodel)