def create_species_sbml(metabolites, outputfile): """Create a SBML files with a list of species containing metabolites of the input set Args: metabolites (set): set of metabolites outputfile (str): SBML file to be written """ document = libsbml.SBMLDocument(2, 1) model = document.createModel("metabolites") for compound in metabolites: compound = compound.strip('"') name, stype, comp = convert_from_coded_id(compound) s = model.createSpecies() sbmlGenerator.check(s, 'create species') sbmlGenerator.check(s.setId(compound), 'set species id') # Add name and compartment if found by padmet if name is not None: sbmlGenerator.check(s.setName(name), 'set species name') elif name is None: logger.warning("No name for " + compound) if comp is not None: sbmlGenerator.check(s.setCompartment(comp), 'set species compartment') elif comp is None: logger.warning("No compartment for " + compound) libsbml.writeSBMLToFile(document, outputfile)
def setBounds(self, bounds_path, model_path): reader = libsbml.SBMLReader() document = reader.readSBML(model_path) model = document.getModel() f = open(bounds_path) for s in f: name, idreaction, model_name, lb, ub = s.strip().split("\t") name = name.replace("-", "") reaction = model.getReaction(name) if reaction is None: # test if there is a reaction starting with a # number, N was added to these in network2sbml.py reaction = model.getReaction("N"+name) if reaction is not None: law = reaction.createKineticLaw() if model.getLevel()==2: lbParameter = law.createParameter() ubParameter = law.createParameter() else: lbParameter = law.createLocalParameter() ubParameter = law.createLocalParameter() lbParameter.setId("LOWER_BOUND") lbParameter.setValue(float(lb)) ubParameter.setId("UPPER_BOUND") ubParameter.setValue(float(ub)) if(float(ub)>0 and float(lb)<0): reaction.setReversible(True) else: reaction.setReversible(False) f.close() libsbml.writeSBMLToFile(document, model_path)
def renameSId (filename, oldSId, newSId, output_file): if oldSId == newSId: print("The Ids are identical, renaming stopped.") return if not libsbml.SyntaxChecker.isValidInternalSId(newSId): print("The new SId '{0}' does not represent a valid SId.".format(newSId)) return document = libsbml.readSBMLFromFile(filename) errors = document.getNumErrors(libsbml.LIBSBML_SEV_ERROR) if errors > 0: document.printErrors() return # find elements for old id element = document.getElementBySId(oldSId) if element == None: print("Found no element with SId '{0}'".format(oldSId)) return # found element -> renaming element.setId(newSId) # update all references to this element allElements = document.getListOfAllElements() for i in range(allElements.getSize()): allElements.get(i).renameSIdRefs(oldSId, newSId) # write to file libsbml.writeSBMLToFile(document, output_file)
def extract_rxn_with_gene_assoc(sbml, output, verbose=False): """ From a given sbml document, create a sbml with only the reactions associated to a gene. Need for a reaction, in section 'note', 'GENE_ASSOCIATION': .... Parameters ---------- sbml_file: libsbml.document sbml document output: str pathname of the output sbml """ reader = libsbml.SBMLReader() sbml_document = reader.readSBML(sbml) for i in range(sbml_document.getNumErrors()): print(sbml_document.getError(i).getMessage()) sbml_model = sbml_document.getModel() listOfReactions = sbml_model.getListOfReactions() reactions_to_remove = [] for reaction in listOfReactions: if "GENE_ASSOCIATION" not in list(parseNotes(reaction).keys()): reactions_to_remove.append(reaction.getId()) for rId in reactions_to_remove: listOfReactions.remove(rId) libsbml.writeSBMLToFile(sbml_document, output)
def renameSId(filename, oldSId, newSId, output_file): if oldSId == newSId: print("The Ids are identical, renaming stopped.") return if not libsbml.SyntaxChecker.isValidInternalSId(newSId): print( "The new SId '{0}' does not represent a valid SId.".format(newSId)) return document = libsbml.readSBMLFromFile(filename) errors = document.getNumErrors(libsbml.LIBSBML_SEV_ERROR) if errors > 0: document.printErrors() return # find elements for old id element = document.getElementBySId(oldSId) if element == None: print("Found no element with SId '{0}'".format(oldSId)) return # found element -> renaming element.setId(newSId) # update all references to this element allElements = document.getListOfAllElements() for i in range(allElements.getSize()): allElements.get(i).renameSIdRefs(oldSId, newSId) # write to file libsbml.writeSBMLToFile(document, output_file)
def add_uncertainty_example(tmp: bool = False) -> None: """Add uncertainty to a model.""" doc: libsbml.SBMLDocument = libsbml.readSBMLFromFile( str(RESOURCES_DIR / "distrib" / "e_coli_core.xml")) # activate distrib doc.enablePackage( "http://www.sbml.org/sbml/level3/version1/distrib/version1", "distrib", True) doc.setPackageRequired("distrib", True) model: libsbml.Model = doc.getModel() model_fbc: libsbml.FbcModelPlugin = model.getPlugin("fbc") # write gene expression data gp = model_fbc.getGeneProduct(0) gp_distrib: libsbml.DistribSBasePlugin = gp.getPlugin("distrib") if gp_distrib: uncertainty: libsbml.Uncertainty = gp_distrib.createUncertainty() up_mean: libsbml.UncertParameter = uncertainty.createUncertParameter() up_mean.setType(libsbml.DISTRIB_UNCERTTYPE_MEAN) up_mean.setValue(2.5) else: logger.error("DistribSBasePlugin not working for fbc:GeneProduct.") # store model with gene expression data if tmp: with tempfile.NamedTemporaryFile(suffix=".xml") as f_sbml: libsbml.writeSBMLToFile(doc, f_sbml.name) else: libsbml.writeSBMLToFile( doc, str(RESOURCES_DIR / "distrib" / "e_coli_core_expression.xml"))
def create_sbml_model( initial_assignments, parameters, rate_rules, species, to_file: str = None, ): """Create an SBML model from simple definitions. See the model definitions and usage in :py:func:`model` for example input. The default initial concentration of species is `1.0`. This can currently be changed by specifying an initial assignment. """ document = libsbml.SBMLDocument(3, 1) model = document.createModel() compartment = model.createCompartment() compartment.setId('compartment') compartment.setConstant(True) compartment.setSize(1) compartment.setSpatialDimensions(3) compartment.setUnits('dimensionless') for species_id in species: species = model.createSpecies() species.setId(species_id) species.setCompartment('compartment') species.setConstant(False) species.setSubstanceUnits('dimensionless') species.setBoundaryCondition(False) species.setHasOnlySubstanceUnits(False) species.setInitialConcentration(1.0) for target, formula in initial_assignments.items(): initial_assignment = model.createInitialAssignment() initial_assignment.setSymbol(target) initial_assignment.setMath(libsbml.parseL3Formula(formula)) for target, formula in rate_rules.items(): rate_rule = model.createRateRule() rate_rule.setVariable(target) rate_rule.setMath(libsbml.parseL3Formula(formula)) for parameter_id, parameter_value in parameters.items(): parameter = model.createParameter() parameter.setId(parameter_id) parameter.setConstant(True) parameter.setValue(parameter_value) parameter.setUnits('dimensionless') if to_file: libsbml.writeSBMLToFile( document, str(to_file), ) # Need to return document, else AMICI throws an error. # (possibly due to garbage collection?) return document, model
def setFormulas(self, formulas_path, model_path): reader = libsbml.SBMLReader() document = reader.readSBML(model_path) model = document.getModel() f = open(formulas_path) for s in f: if not s.startswith("#"): molid, formula = s.strip().split("\t") species = model.getSpecies(molid) if species is not None: note = """ <notes> <body xmlns="http://www.w3.org/1999/xhtml"> <p>FORMULA:%s</p> </body> </notes> """ % (formula.replace(":","").replace(",","")) if species.isSetNotes(): species.appendNotes(note) else: species.setNotes(note) if not species.isSetNotes(): print "Failed to assign note to %s:\n%s" % (molid,note) f.close() libsbml.writeSBMLToFile(document, model_path)
def _adapt_sbml_file(sbml_file): # create temporary filename for an adapted sbml file for Copasi temp = sbml_file.split('.') temp_file_name = temp[0] + '_deleteme.xml' # get the sbml model, adapt it for Copasi sbml_model = (libsbml.readSBML(sbml_file)).getModel() sbml_model_name = sbml_model.getName() if sbml_model_name == '': sbml_model_name = (sbml_file.split('.')[0]).split('/')[-1] sbml_model.setName(sbml_model_name) output_table = [] for compartment in sbml_model.getListOfCompartments(): # Copasi prioritizes species names over IDs # We have to remove them to user IDs as identifiers in Copasi compartment.unsetName() for species in sbml_model.getListOfSpecies(): # Copasi prioritizes species names over IDs # We have to remove them to user IDs as identifiers in Copasi species.unsetMetaId() species.unsetName() output_table.append((species.getCompartment(), species.getId())) # save changed sbml files and the accompaniying sedml file libsbml.writeSBMLToFile(sbml_model.getSBMLDocument(), temp_file_name) return temp_file_name, sbml_model_name, output_table
def petab_problem(minimal_sbml_model): # pylint: disable=W0621 """Test petab problem.""" # create test model document, model = minimal_sbml_model p = model.createParameter() p.setId('fixedParameter1') p.setName('FixedParameter1') p = model.createParameter() p.setId('observable_1') p.setName('Observable 1') measurement_df = pd.DataFrame(data={ OBSERVABLE_ID: ['obs1', 'obs2'], OBSERVABLE_PARAMETERS: ['', 'p1;p2'], NOISE_PARAMETERS: ['p3;p4', 'p5'] }) condition_df = pd.DataFrame(data={ CONDITION_ID: ['condition1', 'condition2'], CONDITION_NAME: ['', 'Condition 2'], 'fixedParameter1': [1.0, 2.0] }).set_index(CONDITION_ID) parameter_df = pd.DataFrame(data={ PARAMETER_ID: ['dynamicParameter1', 'dynamicParameter2'], PARAMETER_NAME: ['', '...'], }).set_index(PARAMETER_ID) observable_df = pd.DataFrame(data={ OBSERVABLE_ID: ['observable_1'], OBSERVABLE_NAME: ['julius'], OBSERVABLE_FORMULA: ['observable_1'], NOISE_FORMULA: [1], }).set_index(OBSERVABLE_ID) with tempfile.TemporaryDirectory() as temp_dir: sbml_file_name = Path(temp_dir, "model.xml") libsbml.writeSBMLToFile(document, str(sbml_file_name)) measurement_file_name = Path(temp_dir, "measurements.tsv") petab.write_measurement_df(measurement_df, measurement_file_name) condition_file_name = Path(temp_dir, "conditions.tsv") petab.write_condition_df(condition_df, condition_file_name) parameter_file_name = Path(temp_dir, "parameters.tsv") petab.write_parameter_df(parameter_df, parameter_file_name) observable_file_name = Path(temp_dir, "observables.tsv") petab.write_observable_df(observable_df, observable_file_name) yield petab.Problem.from_files( sbml_file=sbml_file_name, measurement_file=measurement_file_name, condition_file=condition_file_name, parameter_file=parameter_file_name, observable_files=observable_file_name)
def _process_models(self): """ Process and prepare models for simulation. Resolves the replacements and model couplings between the different frameworks and creates models which can be simulated with the different frameworks. An important step is finding the fba rules in the top model. :return: :rtype: """ logging.debug('* _process_models') ########################### # FBA rules ########################### # process FBA assignment rules of the top model self.flux_rules = DFBAModel._process_flux_rules(self.model_top) ########################### # ODE model ########################### # the roadrunner ode file is the flattened comp file. # FBA parts do not change any of the kinetic subparts (only connections via replaced bounds # and fluxes). # Consequently, the ODE part can be solved as is, only the iterative update between ode and fba has # to be performed # remove FBA assignment rules from the model, so they can be set via the simulator # not allowed to set assignment rules directly in roadrunner for variable in self.flux_rules.values(): self.model_top.removeRuleByVariable(variable) mixed_sbml_cleaned = tempfile.NamedTemporaryFile("w", suffix=".xml") libsbml.writeSBMLToFile(self.doc_top, mixed_sbml_cleaned.name) self.rr_comp = roadrunner.RoadRunner(mixed_sbml_cleaned.name) ########################### # prepare FBA models ########################### # FBA models are found based on the FBA modeling framework mdoc = self.doc_top.getPlugin("comp") for submodel in self.submodels[builder.MODEL_FRAMEWORK_FBA]: mref = submodel.getModelRef() emd = mdoc.getExternalModelDefinition(mref) source = emd.getSource() # check if relative path if not os.path.exists(source): s2 = os.path.join(self.sbml_dir, source) if not os.path.exists(s2): warnings.warn('FBA source cannot be resolved:' + source) else: source = s2 # Create FBA model and process fba_model = FBAModel(submodel=submodel, source=source, model_top=self.model_top) self.fba_models.append(fba_model)
def rxn_kegg2sbml(): kict = get_kegg_data() sbml = libsbml.SBMLReader().readSBML("Gthg_2.2 (manual).xml") print('The model has ', sbml.getNumErrors(), ' errors') model = sbml.getModel() hypercount = defaultdict(Counter) for rxn in model.getListOfReactions(): if rxn.getName() in kict: #Also should add: to RDF #http://www.kegg.jp/entry/R01090 details = kict[rxn.getName()] for f in sorted(details): if f != 'ENTRY': #How do I encode <=>? #Unicode? HTML ascii? #https://en.wikipedia.org/wiki/Arrow_(symbol) #encode('ascii','xmlcharrefreplace')) rxn.appendNotes( '<html:p>' + f + ': ' + '; '.join(details[f]).replace("<=>", "\u21cc").replace( '=>', '\u2192').replace('<=', '\u2190') + '</html:p>') #if 'DEFINITION' in details and 'EQUATION' in details: if 'EQUATION' in details: #Match the species. #prods=[model.getElementBySId(rxn.getProduct(n).getSpecies()) for n in range(rxn.getNumProducts())] #reacs=[model.getElementBySId(rxn.getReactant(n).getSpecies()) for n in range(rxn.getNumReactants())] #Split the def and eq lines and zip then #terms=list(zip([[re.search("([CG]\d+)",s2).group(0) for s2 in re.sub('\(.*?\)','',s).split('+')] for s in re.split('<?=>?',list(details['EQUATION'])[0])],[s.split('+') for s in re.split('<?=>?',list(details['DEFINITION'])[0])])) for k in [ re.search("([CG]\d+)", s2).group(0) for s in re.split('<?=>?', list(details['EQUATION'])[0]) for s2 in re.sub('\(.*?\)', '', s).split('+') ]: hypercount[k].update(['Σ'] + [ rxn.getProduct(n).getSpecies() for n in range(rxn.getNumProducts()) ] + [ rxn.getReactant(n).getSpecies() for n in range(rxn.getNumReactants()) ]) else: print('Unknown ', rxn.getName()) #print(hypercount) mapID = hyperwinner(hypercount) print('data:', mapID) for sp in model.getListOfSpecies(): tag = sp.getId().replace('_e', '_c') if tag in mapID: if type(mapID[tag]) is str: sp.appendNotes('<html:p>KEGG: ' + mapID[tag] + '</html:p>') #else: # sp.appendNotes('<html:p>KEGG: either '+' or '.join(mapID[tag])+'</html:p>') else: sp.appendNotes('<html:p>KEGG: !!!!GOGGLE IT</html:p>') print(tag, ' not an id element') libsbml.writeSBMLToFile(sbml, "test enrich.xml")
def write_ids_to_names(input_path: Path, output_path: Path) -> None: """Write SBML ids as names.""" doc: libsbml.SBMLDocument = libsbml.readSBMLFromFile(str(input_path)) elements = doc.getListOfAllElements() for element in elements: if element.isSetId(): element.setName(element.id) libsbml.writeSBMLToFile(doc, str(output_path))
def save(self, filename=None, set_filename=True): if filename == None: if self.filename == None: raise ValueError else: filename = self.filename libsbml.writeSBMLToFile(self.document, filename) if set_filename == True: self.filename = filename
def merge_models(model_paths, out_dir=None, merged_id="merged", validate=True): """ Merge models in model path. All models must be in the same subfolder. Relative paths are set in the merged models. Output directory must exist. :param model_paths: absolute paths to models :return: """ # necessary to convert models to SBML L3V1 # FIXME: the path should not be changed by functions (this will create problems if run concurrently) cur_dir = os.getcwd() os.chdir(out_dir) base_dir = None for model_id, path in model_paths.items(): if not os.path.exists(path): logging.error('Path for SBML file does not exist: {}'.format(path)) # get base dir of all model files from first file if base_dir is None: base_dir = os.path.dirname(path) else: new_dir = os.path.dirname(path) if new_dir != base_dir: raise ValueError('All SBML files for merging must be in same ' 'directory: {} != {}'.format( new_dir, base_dir)) # convert to L3V1 path_L3 = os.path.join(out_dir, "{}_L3.xml".format(model_id)) doc = libsbml.readSBMLFromFile(path) if doc.getLevel() < SBML_LEVEL: doc.setLevelAndVersion(SBML_LEVEL, SBML_VERSION) libsbml.writeSBMLToFile(doc, path_L3) model_paths[model_id] = path_L3 if validate is True: for path in model_paths: validation.check_sbml(path, name=path) # create comp model merged_doc = create_merged_doc(model_paths, merged_id=merged_id) if validate is True: validation.check_sbml(path, name=path) # write merged doc f_out = os.path.join(out_dir, '{}.xml'.format(merged_id)) libsbml.writeSBMLToFile(merged_doc, f_out) os.chdir(cur_dir) return merged_doc
def merge_models(model_paths, out_dir=None, merged_id="merged", validate=True): """ Merge models in model path. All models must be in the same subfolder. Relative paths are set in the merged models. Output directory must exist. :param model_paths: absolute paths to models :return: """ # necessary to convert models to SBML L3V1 # FIXME: the path should not be changed by functions (this will create problems if run concurrently) cur_dir = os.getcwd() os.chdir(out_dir) base_dir = None for model_id, path in model_paths.items(): if not os.path.exists(path): logging.error('Path for SBML file does not exist: {}'.format(path)) # get base dir of all model files from first file if base_dir is None: base_dir = os.path.dirname(path) else: new_dir = os.path.dirname(path) if new_dir != base_dir: raise ValueError('All SBML files for merging must be in same ' 'directory: {} != {}'.format(new_dir, base_dir)) # convert to L3V1 path_L3 = os.path.join(out_dir, "{}_L3.xml".format(model_id)) doc = libsbml.readSBMLFromFile(path) if doc.getLevel() < SBML_LEVEL: doc.setLevelAndVersion(SBML_LEVEL, SBML_VERSION) libsbml.writeSBMLToFile(doc, path_L3) model_paths[model_id] = path_L3 if validate is True: for path in model_paths: validation.check_sbml(path, name=path) # create comp model merged_doc = create_merged_doc(model_paths, merged_id=merged_id) if validate is True: validation.check_sbml(path, name=path) # write merged doc f_out = os.path.join(out_dir, '{}.xml'.format(merged_id)) libsbml.writeSBMLToFile(merged_doc, f_out) os.chdir(cur_dir) return merged_doc
def sbml_body(): __doc__='SBML2Latex does not like the notes' sbml=libsbml.SBMLReader().readSBML("Gthg_2.4.xml") error_check(sbml) model=sbml.getModel() for sp in model.getListOfSpecies(): notes='<body xmlns="http://www.w3.org/1999/xhtml">\n'+sp.getNotesString()+'\n</body>' sp.setNotes(notes.replace('<notes>\n','').replace('</notes>\n','').replace('html:','')) for rxn in model.getListOfReactions(): notes='<body xmlns="http://www.w3.org/1999/xhtml">\n'+rxn.getNotesString()+'\n</body>' rxn.setNotes(notes.replace('<notes>\n','').replace('</notes>\n','').replace('html:','')) libsbml.writeSBMLToFile(sbml,"Gthg_2.4_html_tweak.xml")
def write_ids_to_names(input_path, output_path): """ :return: """ doc = libsbml.readSBMLFromFile(input_path) # type: libsbml.SBMLDocument elements = doc.getListOfAllElements() for element in elements: if element.isSetId(): element.setName(element.id) print(element) libsbml.writeSBMLToFile(doc, output_path)
def write_sbml_to_file(self, sbml_out): """ Write the SBML file. First create clean SBML file. :param sbml_out: :type sbml_out: :return: :rtype: """ self._create_sbml() libsbml.writeSBMLToFile(self.doc, sbml_out)
def addInChiKey(self, input_sbml, output_sbml): """Check the MIRIAM annotation for MetaNetX or CHEBI id's and try to recover the inchikey from cache and add it to MIRIAM :param input_sbml: SBML file input :param output_sbml: Output SBML file :type input_sbml: str :type output_sbml: str :rtype: bool :return: Success or failure of the function """ filename = input_sbml.split('/')[-1].replace('.rpsbml', '').replace( '.sbml', '').replace('.xml', '') self.logger.debug(filename) rpsbml = rpSBML(inFile=input_sbml) for spe in rpsbml.getModel().getListOfSpecies(): inchikey = None miriam_dict = rpsbml.readMIRIAMAnnotation(spe.getAnnotation()) if 'inchikey' in miriam_dict: self.logger.info('The species ' + str(spe.id) + ' already has an inchikey... skipping') continue try: for mnx in miriam_dict['metanetx']: inchikey = self.cid_strc[self.rpcache._checkCIDdeprecated( mnx, self.deprecatedCID_cid)]['inchikey'] if inchikey: rpsbml.addUpdateMIRIAM(spe, 'species', {'inchikey': [inchikey]}) else: self.logger.warning('The inchikey is empty for: ' + str(spe.id)) continue except KeyError: try: for chebi in miriam_dict['chebi']: inchikey = self.cid_strc[ self.rpcache._checkCIDdeprecated( self.chebi_cid[chebi], self.deprecatedCID_cid)]['inchikey'] if inchikey: rpsbml.addUpdateMIRIAM(spe, 'species', {'inchikey': [inchikey]}) else: self.logger.warning('The inchikey is empty for: ' + str(spe.id)) continue except KeyError: self.logger.warning('Cannot find the inchikey for: ' + str(spe.id)) writeSBMLToFile(rpsbml.document, output_sbml) return True
def main (args): """Usage: setIdFromNames filename output """ if len(args) != 3: print(main.__doc__) sys.exit(1) filename = args[1]; output = args[2]; # read the document start = time.time() * 1000; document = libsbml.readSBMLFromFile(filename); stop = time.time() * 1000; print "" print " filename: {0}".format( filename); print " read time (ms): {0}".format( stop - start); # stop in case of serious errors errors = document.getNumErrors(libsbml.LIBSBML_SEV_ERROR); if (errors > 0): print " error(s): {0}".format(errors); document.printErrors(); sys.exit (errors); # get a list of all elements, as we will need to know all identifiers # so that we don't create duplicates. allElements = document.getListOfAllElements(); # get a list of all ids allIds = getAllIds(allElements); # create the transformer with the ids trans = SetIdFromNames(allIds); # rename the identifiers (using the elements we already gathered before) start = time.time() * 1000; document.getModel().renameIDs(allElements, trans); stop = time.time() * 1000; print " rename time (ms): {0}".format(stop - start); # write to file start = time.time() * 1000; libsbml.writeSBMLToFile(document, output); stop = time.time() * 1000; print " write time (ms): {0}".format(stop - start); print "";
def main(rdir, eqnfn, molfn, taxon, modelid, modelname, species, outfn): mol2name = {} f = open(molfn) for s in f: molid, name, name2 = s.strip().split("\t") mol2name[molid] = name print "Loading reconstruction: %s/%s" % (rdir, common.NETWORK_REACTION_FILE) f = open("%s/%s" % (rdir, common.NETWORK_REACTION_FILE)) bf = open(eqnfn) reco = common.read_reconstruction(f) eqns = read_balanced_reactions(bf) print "%d reactions" % (len(reco)) sbml = convert_to_SBML(reco, eqns, mol2name, taxon, modelid, modelname, species) libsbml.writeSBMLToFile(sbml, outfn)
def create_sbml_file(file_name): """ Simple function that creates a new SBML model 'model1' with a non constant parameter x """ doc = libsbml.SBMLDocument() model = doc.createModel() model.setId('model1') x = model.createParameter() x.setId('x') x.setValue(0) x.setConstant(False) libsbml.writeSBMLToFile(doc, file_name)
def sbml_addkeggchem(): kict = get_kegg_data() sbml = libsbml.SBMLReader().readSBML("Gthg_2.3.xml") error_check(sbml) model = sbml.getModel() for sp in model.getListOfSpecies(): notes = sp.getNotesString() regexmatch = re.search('KEGG:\s+(\w+)', notes) if regexmatch: cid = regexmatch.group(1) check(sp.setNotes(notify(cid)), 'species notes') else: print('KEGG-less:', notes) libsbml.writeSBMLToFile(sbml, "test enrich.xml")
def sbml_addkeggchem(): kict=get_kegg_data() sbml=libsbml.SBMLReader().readSBML("Gthg_2.3.xml") error_check(sbml) model=sbml.getModel() for sp in model.getListOfSpecies(): notes=sp.getNotesString() regexmatch=re.search('KEGG:\s+(\w+)',notes) if regexmatch: cid=regexmatch.group(1) check(sp.setNotes(notify(cid)),'species notes') else: print('KEGG-less:',notes) libsbml.writeSBMLToFile(sbml,"test enrich.xml")
def process_file(sbml_file): reader = libsbml.SBMLReader() doc = reader.readSBML(sbml_file) model = doc.getModel() if not model: return NOT_MODEL, None model_id = model.getId() if not model_id: sbml_name = os.path.splitext(os.path.basename(sbml_file))[0] model.setId(sbml_name) model_id = sbml_name m_id = check_md5(sbml_file) directory = os.path.join('..', 'html', m_id) if os.path.exists(directory): if os.path.exists(os.path.join(directory, 'index.html')): return ALREADY_EXISTS, (model_id, m_id) else: os.makedirs(directory) copytree(LIB, os.path.join(directory, 'lib')) log_file = None try: log_file = os.path.join(directory, 'log.log') with open(log_file, "w+"): pass except: pass logging.basicConfig(level=logging.INFO, format='%(message)s', filename=log_file) if check_for_groups(sbml_file, SBO_CHEMICAL_MACROMOLECULE, GROUP_TYPE_UBIQUITOUS): new_sbml_file = os.path.join(directory, '%s_with_groups.xml' % model_id) if sbml_file != new_sbml_file: if not libsbml.writeSBMLToFile(doc, new_sbml_file): return NOT_MODEL, None os.remove(sbml_file) return ALREADY_GENERALIZED, (model_id, model.getName(), m_id) new_sbml_file = os.path.join(directory, '%s.xml' % model_id) if sbml_file != new_sbml_file: if not libsbml.writeSBMLToFile(doc, new_sbml_file): return NOT_MODEL, None os.remove(sbml_file) return OK, (model_id, model.getName(), m_id)
def writeSBMLToFile(self, fileName): """ Generate SBML from this ReactionNetwork and write it to the given file. """ doc = self.getSBMLDocument() status = libsbml.writeSBMLToFile(doc, fileName) return status
def test_crn_from_sbml(self): filename = path.join(input_sbml, "BIOMD0000000001.xml") output = path.join(input_sbml, "out_BIOMD0000000001.xml") crn = from_sbml(filename) success = libsbml.writeSBMLToFile(crn.document, output) self.assertTrue(success)
def main(rdir, eqnfn, molfn, taxon, modelid, modelname, species, outfn, sbmlversion, boundsfile, rxnnamefile=None, pathwayfile=None): print("Loading molecule names... ") # dictionary where keys are mol ids (C00001) and # items are names from second column of kegg-compounds file mol2name = common.parse_molecule_names(molfn) print "Loading reconstruction: %s/%s" % (rdir, common.NETWORK_REACTION_FILE) f = open("%s/%s" % (rdir, common.NETWORK_REACTION_FILE)) bf = open(eqnfn) reco = common.read_reconstruction(f) eqns = read_balanced_reactions(bf) if pathwayfile is not None: fp = open(pathwayfile) pathways={}; pathwayasnames={}; for s in fp: #print s sisalto = s.strip().split("\t") #print len(sisalto) if len(sisalto)>1: pathways[sisalto[0]]=sisalto[1] if len(sisalto)>2: pathwayasnames[sisalto[0]]=sisalto[2] if len(pathwayasnames) == 0: pathwayasnames = None else: pathways = None if rxnnamefile is not None: fp = open(rxnnamefile) rxnnames={}; for s in fp: sisalto = s.strip().split("\t") if len(sisalto)>1: rxnnames[sisalto[0]]=sisalto[1] #print ("rxnnames[%s] = %s" % (sisalto[0],sisalto[1])) else: rxnnames = None bounds={}; if boundsfile is not None: fp = open(boundsfile) for s in fp: sisalto = s.strip().split("\t") if len(sisalto)>1: bounds[sisalto[0].strip()]=sisalto[3] +","+ sisalto[4] # print "%d reactions" % (len(reco)) sbml = convert_to_SBML(reco, eqns, mol2name, taxon, modelid, modelname, species, sbmlversion, bounds, rxnnames, pathways, pathwayasnames) libsbml.writeSBMLToFile(sbml, outfn)
def test_mass_balance(): doc = libsbml.readSBMLFromFile(data.DEMO_SBML) # add defaults fbc.add_default_flux_bounds(doc) import tempfile f = tempfile.NamedTemporaryFile('w', suffix='xml') libsbml.writeSBMLToFile(doc, f.name) f.flush() model = cobra.io.read_sbml_model(f.name) # mass/charge balance for r in model.reactions: mb = r.check_mass_balance() # all metabolites are balanced assert len(mb) == 0
def annotate_sbml_file(f_sbml, f_annotations, f_sbml_annotated): """ Annotate a given SBML file with the provided annotations. :param f_sbml: SBML to annotation :param f_annotations: csv file with annotations :param f_sbml_annotated: annotated file """ # read SBML model doc = libsbml.readSBML(f_sbml) # read annotations annotations = ModelAnnotator.annotations_from_file(f_annotations) doc = annotate_sbml_doc(doc, annotations) # Save libsbml.writeSBMLToFile(doc, f_sbml_annotated)
def sbml_body(): __doc__ = 'SBML2Latex does not like the notes' sbml = libsbml.SBMLReader().readSBML("Gthg_2.4.xml") error_check(sbml) model = sbml.getModel() for sp in model.getListOfSpecies(): notes = '<body xmlns="http://www.w3.org/1999/xhtml">\n' + sp.getNotesString( ) + '\n</body>' sp.setNotes( notes.replace('<notes>\n', '').replace('</notes>\n', '').replace('html:', '')) for rxn in model.getListOfReactions(): notes = '<body xmlns="http://www.w3.org/1999/xhtml">\n' + rxn.getNotesString( ) + '\n</body>' rxn.setNotes( notes.replace('<notes>\n', '').replace('</notes>\n', '').replace('html:', '')) libsbml.writeSBMLToFile(sbml, "Gthg_2.4_html_tweak.xml")
def add_exchange_reactions(self, model_path): DEFAULT_COMPARTMENT = "cytosol" reader = libsbml.SBMLReader() document = reader.readSBML(model_path) model = document.getModel() print model.getLevel() for reaction in self.reactions: ## if model contains reactants and products if self.contains_specie(reaction, model): r = model.createReaction() r.setId(reaction.get_id()) r.setMetaId("meta_%s" % (reaction.get_id())) r.setName(reaction.get_name()) if float(reaction.get_lb()) < 0 and float(reaction.get_ub()) > 0: r.setReversible(True) else: r.setReversible(False) r.setSBOTerm("SBO:0000176") r.setFast(False) r.setCompartment(DEFAULT_COMPARTMENT) law = r.createKineticLaw() if model.getLevel()==2: lbParameter = law.createParameter() ubParameter = law.createParameter() else: lbParameter = law.createLocalParameter() ubParameter = law.createLocalParameter() lbParameter.setId("LOWER_BOUND") lbParameter.setValue(float(reaction.get_lb())) ubParameter.setId("UPPER_BOUND") ubParameter.setValue(float(reaction.get_ub())) for reactant in reaction.get_reactants(): re = r.createReactant() re.setSpecies(reactant.get_id()) re.setStoichiometry(float(reactant.get_stoichiometry())) re.setConstant(True) for product in reaction.get_products(): pr = r.createProduct() pr.setSpecies(product.get_id()) pr.setStoichiometry(float(product.get_stoichiometry())) pr.setConstant(True) libsbml.writeSBMLToFile(document, model_path)
def process_file(sbml_file): reader = libsbml.SBMLReader() doc = reader.readSBML(sbml_file) model = doc.getModel() if not model: return NOT_MODEL, None model_id = model.getId() if not model_id: sbml_name = os.path.splitext(os.path.basename(sbml_file))[0] model.setId(sbml_name) model_id = sbml_name m_id = check_md5(sbml_file) directory = os.path.join("..", "html", m_id) if os.path.exists(directory): if os.path.exists(os.path.join(directory, "index.html")): return ALREADY_EXISTS, (model_id, m_id) else: os.makedirs(directory) copytree(LIB, os.path.join(directory, "lib")) log_file = None try: log_file = os.path.join(directory, "log.log") with open(log_file, "w+"): pass except: pass logging.basicConfig(level=logging.INFO, format="%(message)s", filename=log_file) if check_for_groups(sbml_file, SBO_CHEMICAL_MACROMOLECULE, GROUP_TYPE_UBIQUITOUS): new_sbml_file = os.path.join(directory, "%s_with_groups.xml" % model_id) if sbml_file != new_sbml_file: if not libsbml.writeSBMLToFile(doc, new_sbml_file): return NOT_MODEL, None os.remove(sbml_file) return ALREADY_GENERALIZED, (model_id, model.getName(), m_id) new_sbml_file = os.path.join(directory, "%s.xml" % model_id) if sbml_file != new_sbml_file: if not libsbml.writeSBMLToFile(doc, new_sbml_file): return NOT_MODEL, None os.remove(sbml_file) return OK, (model_id, model.getName(), m_id)
def __init__(self, modelPath, reactionPath, biomassPath, boundsPath, exchange, outputPath): self.reactions = [] self.model ='' if(modelPath): #read model reader = libsbml.SBMLReader() document = reader.readSBML(modelPath) self.model = document.getModel() newDocument = document.clone(); if(reactionPath): self.parseReactionBag(reactionPath) if self.model == None: return listOfReactions = self.model.getListOfReactions() for reaction in self.reactions: if not reaction.isInModel(listOfReactions): print "adding Reaction:" + reaction.getId() else: print "the reaction " + reaction.getId() + " is already on the model. OverWriting..." self.model.getReaction(reaction.getOverReaction()).removeFromParentAndDelete() self.addReactionInToModel(reaction, self.model) if(biomassPath): print "adding biomass Reaction" reaction = self.parseBiomass(biomassPath) if not reaction.isInModel(self.model.getListOfReactions()): self.addReactionInToModel(reaction, self.model) if(boundsPath): print "adding new bounds" self.parseBounds(boundsPath, self.model) if(exchange): print "adding exchage reactions" self.addExchangeReactions(self.model) if(outputPath): print "Writing file" newDocument.setModel(self.model) libsbml.writeSBMLToFile(newDocument, outputPath)
def sbml_rdf_fixer(): __doc__='Too hard to workout. Abbanded.' sbml=libsbml.SBMLReader().readSBML("Gthg_2.4.xml") error_check(sbml) model=sbml.getModel() for sp in model.getListOfSpecies(): if not sp.getCVTerms(): print(sp.addCVTerm(libsbml.CVTerm(libsbml.BIOLOGICAL_QUALIFIER), True)) bag=sp.getCVTerm(0) notes=sp.getNotesString() regexmatch=re.search('CHEBI:\s+(.*?)[<;]',notes) if regexmatch: name=regexmatch.group(1) bag.addResource("urn:miriam:chebi:CHEBI%3A"+name) regexmatch=re.search('KEGG:\s+(.*?)[<;]',notes) if regexmatch: name=regexmatch.group(1) bag.addResource("urn:miriam:kegg.compound:"+name) libsbml.writeSBMLToFile(sbml,"test enrich.xml")
def rxn_kegg2sbml(): kict=get_kegg_data() sbml=libsbml.SBMLReader().readSBML("Gthg_2.2 (manual).xml") print('The model has ',sbml.getNumErrors(),' errors') model=sbml.getModel() hypercount=defaultdict(Counter) for rxn in model.getListOfReactions(): if rxn.getName() in kict: #Also should add: to RDF #http://www.kegg.jp/entry/R01090 details=kict[rxn.getName()] for f in sorted(details): if f !='ENTRY': #How do I encode <=>? #Unicode? HTML ascii? #https://en.wikipedia.org/wiki/Arrow_(symbol) #encode('ascii','xmlcharrefreplace')) rxn.appendNotes('<html:p>'+f+': '+'; '.join(details[f]).replace("<=>","\u21cc").replace('=>','\u2192').replace('<=','\u2190')+'</html:p>') #if 'DEFINITION' in details and 'EQUATION' in details: if 'EQUATION' in details: #Match the species. #prods=[model.getElementBySId(rxn.getProduct(n).getSpecies()) for n in range(rxn.getNumProducts())] #reacs=[model.getElementBySId(rxn.getReactant(n).getSpecies()) for n in range(rxn.getNumReactants())] #Split the def and eq lines and zip then #terms=list(zip([[re.search("([CG]\d+)",s2).group(0) for s2 in re.sub('\(.*?\)','',s).split('+')] for s in re.split('<?=>?',list(details['EQUATION'])[0])],[s.split('+') for s in re.split('<?=>?',list(details['DEFINITION'])[0])])) for k in [re.search("([CG]\d+)",s2).group(0) for s in re.split('<?=>?',list(details['EQUATION'])[0]) for s2 in re.sub('\(.*?\)','',s).split('+')]: hypercount[k].update(['Σ']+[rxn.getProduct(n).getSpecies() for n in range(rxn.getNumProducts())]+[rxn.getReactant(n).getSpecies() for n in range(rxn.getNumReactants())]) else: print('Unknown ',rxn.getName()) #print(hypercount) mapID=hyperwinner(hypercount) print('data:',mapID) for sp in model.getListOfSpecies(): tag=sp.getId().replace('_e','_c') if tag in mapID: if type(mapID[tag]) is str: sp.appendNotes('<html:p>KEGG: '+mapID[tag]+'</html:p>') #else: # sp.appendNotes('<html:p>KEGG: either '+' or '.join(mapID[tag])+'</html:p>') else: sp.appendNotes('<html:p>KEGG: !!!!GOGGLE IT</html:p>') print(tag,' not an id element') libsbml.writeSBMLToFile(sbml,"test enrich.xml")
def test_crn_from_react_file(self): reactions = parse_reaction_file(path.join(input_reactions, "allosteric_activation")) filename = path.join(input_sbml, "allosteric_activation.xml") model, document, _ = model_from_reacts(reactions) success = libsbml.writeSBMLToFile(document, filename) self.assertTrue(success) crn = from_sbml(filename) crn.inspect()
def add_timecourse_as_events( petab_problem: petab.Problem, #sbml_path: TYPE_PATH, timecourse_id: str = None, output_path: Optional[TYPE_PATH] = None, ): #sbml_path = str(sbml_path) #if output_path is None: # output_path = sbml_path #output_path = str(output_path) if timecourse_id is None: try: timecourse_id = one(petab_problem.timecourse_df.index) except ValueError: raise ValueError( 'A timecourse ID must be specified if there are multiple ' 'timecourses in the PEtab problem timecourse table.') #sbml_document = libsbml.SBMLReader().readSBML(sbml_path) sbml_model = petab_problem.sbml_document.getModel() #if sbml_model is None: # raise ValueError( # 'An SBML model could not be reproduced from the SBML file.' # ) timecourse = parse_timecourse_string( petab_problem.timecourse_df.loc[timecourse_id][TIMECOURSE], ) for time, condition_id in timecourse: add_event( sbml_model=sbml_model, event_id=get_slug(time), trigger_formula=f'time >= {time}', event_assignments=Condition( petab_problem.condition_df.loc[condition_id], ), ) if output_path is not None: libsbml.writeSBMLToFile(sbml_document, str(output_path)) return sbml_model
def prepare_rbc_model(model_path, name, target_dir): """ Add ports to the RBC model. :param model_path: :param name: :param target_dir: :return: """ doc = libsbml.readSBMLFromFile(model_path) # type: libsbml.SBMLDocument # add comp package # sbmlns = libsbml.SBMLNamespaces(doc.getLevel(), doc.getVersion()) # sbmlns.addPackageNamespace("comp", 1) doc.enablePackage("http://www.sbml.org/sbml/level3/version1/comp/version1", "comp", True) # doc.setNamespaces(sbmlns) doc.setPackageRequired("comp", True) model = doc.getModel() model.setId(name) print(model) cmodel = model.getPlugin("comp") # type: libsbml.CompModelPlugin def create_port(sid): """" Creates port for given SBase ID.""" p = cmodel.createPort() # type: libsbml.Port port_sid = f'{sid}{PORT_SUFFIX}' p.setId(port_sid) p.setName(port_sid) p.setMetaId(port_sid) p.setSBOTerm(599) # port p.setIdRef(sid) return p # add ports cmodel = model.getPlugin("comp") # type: libsbml.CompModelPlugin for sid in ['Vplasma', 'glcEXT', 'lacEXT', 'phosEXT', 'pyrEXT']: create_port(sid) output_path = os.path.join(target_dir, "{}.xml".format(name)) libsbml.writeSBMLToFile(doc, output_path) return output_path
def _create_sbml(self): """ Create the SBMLDocument. :return: :rtype: """ self._init_sbml_model() self.interpolators = Interpolation.create_interpolators( self.data, self.method) for interpolator in self.interpolators: Interpolation.add_interpolator_to_model(interpolator, self.model) # validation of SBML document try: temp_dir = tempfile.mkdtemp() tmp_f = os.path.join(temp_dir, 'validated.xml') libsbml.writeSBMLToFile(self.doc, tmp_f) validation.check_sbml(tmp_f, ucheck=False) finally: shutil.rmtree(temp_dir)
def add_uncertainty_example(tmp: bool = False) -> None: """Add uncertainty to a model.""" output_dir = str(Path(__file__).parent) doc = libsbml.readSBMLFromFile(os.path.join( output_dir, "e_coli_core.xml")) # type: libsbml.SBMLDocument # activate distrib doc.enablePackage( "http://www.sbml.org/sbml/level3/version1/distrib/version1", "distrib", True) doc.setPackageRequired("distrib", True) model = doc.getModel() # type: libsbml.Model model_fbc = model.getPlugin("fbc") # type: libsbml.FbcModelPlugin # -------------------------------- # [2] write gene expression data # -------------------------------- gp = model_fbc.getGeneProduct(0) print(gp) gp_distrib = gp.getPlugin("distrib") # type: libsbml.DistribSBasePlugin print(gp_distrib) if gp_distrib: uncertainty = gp_distrib.createUncertainty( ) # type: libsbml.Uncertainty up_mean = uncertainty.createUncertParameter( ) # type: libsbml.UncertParameter up_mean.setType(libsbml.DISTRIB_UNCERTTYPE_MEAN) up_mean.setValue(2.5) else: logging.error("DistribSBasePlugin not working for fbc:GeneProduct.") # store model with gene expression data if tmp: with tempfile.NamedTemporaryFile(suffix=".xml") as f_sbml: libsbml.writeSBMLToFile(doc, f_sbml.name) else: libsbml.writeSBMLToFile( doc, os.path.join(output_dir, "e_coli_core_expression.xml"))
def sbml_rdf_fixer(): __doc__ = 'Too hard to workout. Abbanded.' sbml = libsbml.SBMLReader().readSBML("Gthg_2.4.xml") error_check(sbml) model = sbml.getModel() for sp in model.getListOfSpecies(): if not sp.getCVTerms(): print( sp.addCVTerm(libsbml.CVTerm(libsbml.BIOLOGICAL_QUALIFIER), True)) bag = sp.getCVTerm(0) notes = sp.getNotesString() regexmatch = re.search('CHEBI:\s+(.*?)[<;]', notes) if regexmatch: name = regexmatch.group(1) bag.addResource("urn:miriam:chebi:CHEBI%3A" + name) regexmatch = re.search('KEGG:\s+(.*?)[<;]', notes) if regexmatch: name = regexmatch.group(1) bag.addResource("urn:miriam:kegg.compound:" + name) libsbml.writeSBMLToFile(sbml, "test enrich.xml")
def test_biomodel_merge(): """ Test model merging. Using the pytest tmpdir fixture :param tmpdir: :return: """ manipulation_dir = os.path.join(data_dir, 'manipulation') # dictionary of ids & paths of models which should be combined # here we just bring together the first Biomodels model_ids = ["BIOMD000000000{}".format(k) for k in range(1, 5)] model_paths = dict(zip(model_ids, [os.path.join(manipulation_dir, "{}.xml".format(mid)) for mid in model_ids]) ) print(model_paths) # merge model out_dir = os.path.join(manipulation_dir, 'output') if not os.path.exists(out_dir): os.mkdir(out_dir) print('out_dir:', out_dir) doc = manipulation.merge_models(model_paths, out_dir=out_dir, validate=False) assert doc is not None Nall, Nerr, Nwarn = validation.check_doc(doc, ucheck=False) assert Nerr == 0 assert Nwarn == 0 assert Nall == 0 # flatten the model doc_flat = comp.flattenSBMLDocument(doc) assert doc_flat is not None libsbml.writeSBMLToFile(doc_flat, os.path.join(out_dir, "merged_flat.xml")) Nall, Nerr, Nwarn = validation.check_doc(doc_flat, ucheck=False) assert Nerr == 0 assert Nwarn in [0, 74] assert Nall in [0, 74]
def create_sbml_report(sbml_path, out_dir, template='report.html', promote=False, validate=True): """ Creates the SBML report in the out_dir :param validate: :param promote: :param template: :param sbml_path: :param doc: :param out_dir: :return: :rtype: """ # check if sbml_file exists if not os.path.exists(sbml_path): warnings.warn('SBML file does not exist: {}'.format(sbml_path)) # check sbml file if validate: check_sbml(sbml_path) # read sbml doc = libsbml.readSBML(sbml_path) if promote: promote_local_variables(doc) # write sbml to output folder basename = os.path.basename(sbml_path) tokens = basename.split('.') name = '.'.join(tokens[:-1]) f_sbml = os.path.join(out_dir, basename) libsbml.writeSBMLToFile(doc, f_sbml) # write html (unicode) html = _create_html(doc, basename, html_template=template) f_html = codecs.open(os.path.join(out_dir, '{}.html'.format(name)), encoding='utf-8', mode='w') f_html.write(html) f_html.close()
def example(path): """ Example FBA with demo model. :param path: :type path: :return: :rtype: """ doc = libsbml.readSBMLFromFile(path) # add defaults fbc.add_default_flux_bounds(doc) import tempfile f = tempfile.NamedTemporaryFile('w', suffix='xml') libsbml.writeSBMLToFile(doc, f.name) f.flush() model = cobra.io.read_sbml_model(f.name) # mass/charge balance for r in model.reactions: mb = r.check_mass_balance() print(r.id, mb)
parser.add_argument('--metabolite-id', dest="metabolite_id", default="name", action='store', help="Strategy to generate unique metabolite id. Specify 'name' to use metabolite name as SBML id or 'auto' to use auto-incrementing id M_XXXX. (default: name)") parser.add_argument('--compartment-id', dest="compartment_id", default="name", action='store', help="Strategy to generate unique compartment id. Specify 'name' to use compartment name as SBML id or 'auto' to use auto-incrementing id C_XXXX. (default: name)") parser.add_argument('--metabolite-map', dest="metabolite_map", action='store', help="Map metabolite identifiers to names") args = parser.parse_args() # Read bioopt model parser = BiooptParser(inf=args.in_inf) model = parser.parse_file(args.bioopt) metabolite_map = {} if args.metabolite_map: for i, line in enumerate(open(args.metabolite_map)): l = re.split("\t", line.rstrip()) if len(l) >= 2: metabolite_map[l[0]] = l[1] else: raise RuntimeError("Error on line {} in '{}' file. Map file should have more than 2 columns".format(i, args.metabolite_map)) converter = Bioopt2SbmlConverter(level=args.level, version=args.version, compartment_pattern=args.c_pattern, inf=args.out_inf, metabolite_map=metabolite_map, reaction_id=args.reaction_id, metabolite_id=args.metabolite_id, compartment_id=args.compartment_id) sbml = converter.convert(model) libsbml.writeSBMLToFile(sbml, args.sbml) print "Finished converting {0} into SBML ({1})".format(args.bioopt, args.sbml)
# setting because it is required (if unlucky additional info required) # but we can't set it because we can't access the FBCModelPlugins of the ModelDefinitions if fbc_model is not None: if not fbc_model.isSetStrict(): fbc_model.setStrict(False) else: print("WARNING: This should never happen. All ModelDefinitions should have a FBCModelPlugin") doc.checkInternalConsistency() doc.printErrors() return doc if __name__ == "__main__": import libsbml from os.path import join as pjoin directory = './emd_files/' top_file = pjoin(directory, 'diauxic_top.xml') # replace the ExternalModelDefinitions with ModelDefinitions doc_top = libsbml.readSBMLFromFile(top_file) doc_no_emd = flattenExternalModelDefinitions(doc_top) # write to file libsbml.writeSBMLToFile(doc_no_emd, pjoin(directory, 'test_emd_flat.xml')) print(libsbml.__version__)
def save_as_sbml(input_model, out_sbml): logging.info("saving to {0}".format(out_sbml)) out_doc = libsbml.SBMLDocument(input_model.getSBMLNamespaces()) out_doc.setModel(input_model) libsbml.writeSBMLToFile(out_doc, out_sbml)
def simple_merge_models(S, model_id2c_id2group, model_id2dfs, out_sbml): doc = libsbml.SBMLDocument(2, 4) model = doc.createModel() model.setId('merged_model') model_id2id2id = defaultdict(dict) common_ids = set() c_group2id = {} new_m_id2i, new_r_id2i, new_efm_id2i, new_boundary_m_ids = {}, {}, {}, [] for model_id, [_, _, df] in model_id2dfs.items(): for index, row in df.iterrows(): c_id, name = row['Id'], row['Name'] if model_id in model_id2c_id2group and c_id in model_id2c_id2group[model_id]: group = model_id2c_id2group[model_id][c_id] if group in c_group2id: new_id = c_group2id[group] else: new_id = create_compartment(model, name=name, id_='merged_%s_%s' % (model_id, c_id)).getId() c_group2id[group] = new_id common_ids.add(new_id) else: new_id = create_compartment(model, name=name, id_='%s_%s' % (model_id, c_id)).getId() model_id2id2id[model_id][c_id] = new_id id2id = {} m_id_group_ids = set(S.m_id2gr_id.values()) for (model_id, m_id), i in ((it, i) for (it, i) in S.m_id2i.items() if it not in m_id_group_ids): c_id = model_id2dfs[model_id][0].at[m_id, 'Compartment'] c_id = model_id2id2id[model_id][c_id] name = model_id2dfs[model_id][0].at[m_id, 'Name'] is_boundary = (model_id, m_id) in S.boundary_m_ids new_id = create_species(model, compartment_id=c_id, name=name, bound=is_boundary, id_='%s_%s' % (model_id, m_id)).getId() model_id2id2id[model_id][m_id] = new_id id2id[(model_id, m_id)] = new_id new_m_id2i[new_id] = i if is_boundary: new_boundary_m_ids.append(new_id) for it in m_id_group_ids: model_id, m_ids = next(iter(it)) m_id = next(iter(m_ids)) is_boundary = it in S.boundary_m_ids new_id = \ create_species(model, compartment_id=model_id2id2id[model_id][model_id2dfs[model_id][0].at[m_id, 'Compartment']], name=model_id2dfs[model_id][0].at[m_id, 'Name'], bound=is_boundary, id_='merged_%s_%s' % (model_id, m_id)).getId() for model_id, m_ids in it: model_id2id2id[model_id].update({m_id: new_id for m_id in m_ids}) id2id[it] = new_id new_m_id2i[new_id] = S.m_id2i[it] if is_boundary: new_boundary_m_ids.append(new_id) common_ids.add(new_id) for ((model_id, r_id), i) in ((it, i) for (it, i) in S.r_id2i.items() if it not in S.gr_id2r_id2c.keys()): r_id2st, p_id2st = S.st_matrix.get_inputs_outputs((model_id, r_id)) new_id = create_reaction(model, {id2id[m_id]: st for (m_id, st) in r_id2st.items()}, {id2id[m_id]: st for (m_id, st) in p_id2st.items()}, model_id2dfs[model_id][1].at[r_id, 'Name'], reversible=True, id_='%s_%s' % (model_id, r_id)).getId() model_id2id2id[model_id][r_id] = new_id new_r_id2i[new_id] = i for gr, it2c in S.gr_id2r_id2c.items(): model_id, r_id = next(iter(it2c.keys())) r_id2st, p_id2st = S.st_matrix.get_inputs_outputs(gr) new_id = \ create_reaction(model, {id2id[m_id]: st for (m_id, st) in r_id2st.items()}, {id2id[m_id]: st for (m_id, st) in p_id2st.items()}, model_id2dfs[model_id][1].at[r_id, 'Name'], reversible=True, id_='merged_%s_%s' % (model_id, r_id)).getId() for model_id, r_id in it2c.keys(): model_id2id2id[model_id][r_id] = new_id new_r_id2i[new_id] = S.r_id2i[gr] common_ids.add(new_id) for ((model_id, efm_id), i) in ((it, i) for (it, i) in S.efm_id2i.items() if it not in S.gr_id2efm_ids.keys()): new_id = '%s_%s' % (model_id, efm_id) new_efm_id2i[new_id] = i model_id2id2id[model_id][efm_id] = new_id for gr, efm_ids in S.gr_id2efm_ids.items(): model_id, efm_id = next(efm_ids) new_id = 'merged_%s_%s' % (model_id, efm_id) new_efm_id2i[new_id] = S.r_id2i[gr] for model_id, efm_id in efm_ids: model_id2id2id[model_id][efm_id] = new_id libsbml.writeSBMLToFile(doc, out_sbml) return model_id2id2id, common_ids, System(m_id2i=new_m_id2i, r_id2i=new_r_id2i, efm_id2i=new_efm_id2i, N=S.N, V=S.V, boundary_m_ids=new_boundary_m_ids)
# success probability of Geometric-1 up_mean_geo1 = up.createUncertParameter() # type: libsbml.UncertParameter up_mean_geo1.setType(libsbml.DISTRIB_UNCERTTYPE_EXTERNALPARAMETER) up_mean_geo1.setName("success probability of Geometric 1") up_mean_geo1.setValue(0.4) up_mean_geo1.setDefinitionURL("http://www.probonto.org/ontology#PROB_k0000789") return doc if __name__ == "__main__": functions = [ # distrib_normal, # distrib_all, uncertainty, ] for f_creator in functions: name = f_creator.__name__ print(name) # distrib_example1() doc = f_creator() sbml = libsbml.writeSBMLToString(doc) print("-" * 80) print(sbml) print("-" * 80) sbml_path = "./{}.xml".format(name) libsbml.writeSBMLToFile(doc, sbml_path) validation.check_doc(doc)
if not os.path.isfile( ANN_FILE_PATH): logging.getLogger( "st2sbml").error( "input {0} does not exist or is not a file".format( ANN_FILE_PATH)) exit(1) TRIGGER, ENTITY_TRIGGER, EVENTS = parse_standoff.parse_ann( ANN_FILE_PATH); else: A1_FILE_PATH = CMD_ARGS.path + CMD_ARGS.a1 A2_FILE_PATH = CMD_ARGS.path + CMD_ARGS.a2 logging.getLogger( "st2sbml").info( "Processing %s a1/a2", CMD_ARGS.path) if not os.path.isfile( A1_FILE_PATH): logging.getLogger( "st2sbml").error( "Input %s does not exist or is not a file", A1_FILE_PATH) exit(1) if not os.path.isfile( A2_FILE_PATH): logging.getLogger( "st2sbml").error( "Input %s does not exist or is not a file", A2_FILE_PATH) exit(1) # parse the a1/a2 files TRIGGER, ENTITY_TRIGGER, EVENTS = parse_standoff.parse_a1_a2( A1_FILE_PATH, A2_FILE_PATH); else: ANN_FILE_PATH = CMD_ARGS.file logging.getLogger( "st2sbml").info( "Processing %s", ANN_FILE_PATH) if not os.path.isfile( ANN_FILE_PATH): logging.getLogger( "st2sbml").error( "input {0} does not exist or is not a file".format( ANN_FILE_PATH)) exit(1) TRIGGER, ENTITY_TRIGGER, EVENTS = parse_standoff.parse_ann( ANN_FILE_PATH); logging.getLogger( "st2sbml").info( "Processing {}: loaded {} entity trigger and {} events".format( CMD_ARGS.path, len(ENTITY_TRIGGER), len( EVENTS.keys()))) DOCUMENT = create_document( trigger = TRIGGER, entity_trigger = ENTITY_TRIGGER, events = EVENTS, arguments = CMD_ARGS) ##### WRITE SBML FILE libsbml.writeSBMLToFile( DOCUMENT, CMD_ARGS.output)
gpr_dict[cols[1]] = cols[4] infile = '/Users/wbryant/Dropbox/UG Project - COGzymes/model data/BTH_iah991.xml' reader = SBMLReader() document = reader.readSBMLFromFile(infile) model = document.getModel() notes_string = ' <body xmlns="http://www.w3.org/1999/xhtml">\n <p>GENE_ASSOCIATION: %s</p>\n </body>' for reaction in model.getListOfReactions(): id_rxn = reaction.getId() id_rxn = re.sub("^R_","",id_rxn) id_rxn = re.sub("_LPAREN_","(",id_rxn) id_rxn = re.sub("_RPAREN_",")",id_rxn) id_rxn = re.sub("_DASH_","-",id_rxn) if id_rxn in gpr_dict: gpr = gpr_dict[id_rxn] else: print('%s not found ...' % id_rxn) gpr = '' gpr_string = notes_string % gpr reaction.setNotes(gpr_string) document.setModel(model) writeSBMLToFile(document,'BTH_with_gprs.xml')
import tesbml as libsbml doc = libsbml.SBMLDocument(3, 2) model = doc.createModel() model.id = "test_inline_unit" ud = model.createUnitDefinition() ud.setId("m") u = model.createUnit() u.setKind(libsbml.UNIT_KIND_METRE) u.setExponent(1.0) u.setScale(1) u.setMultiplier(1.0) ud.addUnit(u) p = model.createParameter() p.id = "p" p.constant = False p.units = "m" rule = model.createAssignmentRule() rule.variable = "p" ast = libsbml.parseL3FormulaWithModel("5.0 m", model) rule.setMath(ast) formula = libsbml.formulaToL3String(ast) print(formula) libsbml.writeSBMLToFile(doc, "/home/mkoenig/Desktop/inline_units_py.xml")
def flattenSBMLDocument(doc, leave_ports=True, output_path=None, suffix='_flat'): """ Flatten the given SBMLDocument. Validation should be performed before the flattening and is not part of the flattening routine. :param doc: SBMLDocument to flatten. :type doc: SBMLDocument :return: :rtype: SBMLDocument """ Nerrors = doc.getNumErrors() if Nerrors > 0: if doc.getError(0).getErrorId() == libsbml.XMLFileUnreadable: # Handle case of unreadable file here. logging.error("SBML error in doc: libsbml.XMLFileUnreadable") elif doc.getError(0).getErrorId() == libsbml.XMLFileOperationError: # Handle case of other file error here. logging.error("SBML error in doc: libsbml.XMLFileOperationError") else: # Handle other error cases here. logging.error("SBML errors in doc, see SBMLDocument error log.") # converter options props = libsbml.ConversionProperties() props.addOption("flatten comp", True) # Invokes CompFlatteningConverter props.addOption("leave_ports", leave_ports) # Indicates whether to leave ports # flatten current = time.clock() result = doc.convert(props) flattened_status = (result==libsbml.LIBSBML_OPERATION_SUCCESS) lines = [ '', '-' * 120, str(doc), "{:<25}: {}".format("flattened", str(flattened_status).upper()), "{:<25}: {:.3f}".format("flatten time (ms)", time.clock() - current), '-' * 120, ] info = "\n".join(lines) if flattened_status: logging.info(info) else: logging.error(info) raise ValueError("SBML could not be flattend due to errors in the SBMLDocument.") if suffix is not None: model = doc.getModel() if model is not None: model.setId(model.getId() + suffix) if model.isSetName(): model.setName(model.getName() + suffix) if output_path is not None: # Write the results to the output file. libsbml.writeSBMLToFile(doc, output_path) logging.info("Flattened model written to {}".format(output_path)) return doc
def simulate(mixed_sbml, tend=10.0, step_size=0.1, debug=False): """ Performs model simulation. The simulator figures out based on the SBO terms in the list of submodels, which simulation/modelling framework to use. The passing of information between FBA and SSA/ODE is based on the list of replacements. :param mixed_sbml: comp sbml model with multiple submodels :param tend: end time of the simulation :param step_size: step size for the integration, if None variable step size will be used :param debug: additional information :return: pandas solution data frame """ ############################### # Process FBA assignment rules ############################### # Necessary to find the assignment rules in the top model # These cannot be set in an hybrid approach. doc = libsbml.readSBMLFromFile(mixed_sbml) model = doc.getModel() def _process_mixed_assignments(model): """ Find the assignment rules which assign to a variable a reaction rate. """ fba_rules = {} for rule in model.getListOfRules(): if not rule.isAssignment(): continue variable = rule.getVariable() formula = rule.getFormula() parameter = model.getParameter(variable) if not parameter: continue reaction = model.getReaction(formula) if not reaction: continue fba_rules[reaction.getId()] = parameter.getId() return fba_rules fba_rules = _process_mixed_assignments(model) print('FBA rules:', fba_rules) # remove the FBA assignment rules from the model, so they can be set via the simulator for variable in fba_rules.values(): print(variable) model.removeRuleByVariable(variable) import tempfile mixed_sbml_cleaned = tempfile.NamedTemporaryFile("w", suffix=".xml") libsbml.writeSBMLToFile(doc, mixed_sbml_cleaned.name) # mixed_sbml_cleaned.close() ########################### # Load ODE model ########################### # the roadrunner ode file is the flattend comp file. # FBA subparts do not change change any of the kinetic subparts (only connections via replaced bounds # and fluxes). # Consequently the ode part can be solved as is, only the iterative update between ode and fba has # to be performed rr_comp = roadrunner.RoadRunner(mixed_sbml_cleaned.name) sel = ['time'] \ + ["".join(["[", item, "]"]) for item in rr_comp.model.getBoundarySpeciesIds()] \ + ["".join(["[", item, "]"]) for item in rr_comp.model.getFloatingSpeciesIds()] \ + rr_comp.model.getReactionIds() + fba_rules.values() rr_comp.timeCourseSelections = sel rr_comp.reset() ########################### # Load FBA models ########################### # via the submodels information it is decided which submodels are belonging to which # modeling framework (SBOTerms on submodel) doc = libsbml.readSBMLFromFile(mixed_sbml) model_frameworks = comp.get_submodel_frameworks(doc) model = doc.getModel() # get top level reaction ids # top_level_rids = frozenset([reaction.getId() for reaction in model.getListOfReactions()]) # assign submodels to FBA fba_models = {} for key, value in model_frameworks.iteritems(): if value["sbo"] == 624: print('FBA model') # get SBML file modelRef = value["modelRef"] mdoc = doc.getPlugin("comp") emd = mdoc.getExternalModelDefinition(modelRef) source = emd.getSource() print(source) fba_models[key] = {'cobra': cobra.io.read_sbml_model(source), 'doc': libsbml.readSBMLFromFile(source)} elif value['sbo'] == 62: print('ODE model') # get the sbml file modelRef = value["modelRef"] mdoc = doc.getPlugin("comp") emd = mdoc.getExternalModelDefinition(modelRef) source = emd.getSource() print(source) else: raise Exception("Modelling framework not supported, or not annotated on submodel: ", sbo) # submodels handled by FBA print(fba_models) ########################### # Simulation ########################### all_results = [] all_time = [] result = None time = 0.0 while time <= tend: if debug: print("-" * 80) print("Time: {}".format(time)) # -------------------------------------- # FBA # -------------------------------------- # all fba submodels are simulated for fba_key, fba_info in fba_models.iteritems(): cobra_model = fba_info['cobra'] sbml_model = fba_info['doc'].getModel() # TODO: calculate once (not required in loop) # which parameters are upper or lower bounds from collections import defaultdict ub_parameters = defaultdict(list) lb_parameters = defaultdict(list) for r in sbml_model.getListOfReactions(): mr = r.getPlugin("fbc") rid = r.getId() if mr.isSetUpperFluxBound(): ub_parameters[mr.getUpperFluxBound()].append(rid) if mr.isSetLowerFluxBound(): lb_parameters[mr.getLowerFluxBound()].append(rid) print(ub_parameters) print(lb_parameters) # search in global parameter replacements for replacements which replace the bounds # of reactions print("* set bounds *") for p in model.getListOfParameters(): pid = p.getId() mp = p.getPlugin("comp") for rep_element in mp.getListOfReplacedElements(): # the submodel of the replacement belongs to the current model if rep_element.getSubmodelRef() == fba_key: # update upper and lower bounds for rid in ub_parameters.get(pid, []): print(rid, ': (upper) -> ', pid) cobra_reaction = cobra_model.reactions.get_by_id(rid) cobra_reaction.upper_bound = rr_comp[pid] for rid in lb_parameters.get(pid, []): print(rid, ': (lower) -> ', pid) cobra_reaction = cobra_model.reactions.get_by_id(rid) cobra_reaction.lower_bound = rr_comp[pid] # [2] optimize print("* optimize *") # TODO: start with last solution (speed improvement) cobra_model.optimize() # [3] set fluxes in ODE part print("* set fba fluxes in ode *") # based on replacements the fluxes are written in the kinetic part # set solution fluxes in parameters for (rid, flux) in cobra_model.solution.x_dict.iteritems(): if rid in fba_rules: rr_comp[fba_rules[rid]] = flux print(rid, ':', fba_rules[rid], "=", flux) else: print(rid, "no boundary flux") if debug: print_flux_bounds(cobra_model) print(cobra_model.solution.status) print(cobra_model.solution.x_dict) print("-" * 80) # -------------------------------------- # ODE # -------------------------------------- # With the updated fluxes from the FBA the kinetic part is # calculated: # simulate (1 step) if step_size: # constant step size result = rr_comp.simulate(0, end=step_size, steps=1) else: # variable step size result = rr_comp.simulate(0, steps=1, variableStep=True) # store results all_results.append(result[1]) # set the fba fluxes in the model) # TODO: the fba fluxes are not set in the full kinetic result (shown as zero) # these have to be set with the mapping between comp and flattened model all_time.append(time) # store simulation values & get time step delta_time = result['time'][1] time = time + delta_time if debug: print(result) # create result matrix df = pd.DataFrame(data=all_results, columns=result.colnames) df.time = all_time print(df) return df