Пример #1
0
    def __init__(self,
                 model_code='ecoli:iJO1366',
                 solver='gurobi',
                 min_biomass=0.55):
        self.logger = get_bistream_logger(
            (model_code + ':' + self.__class__.__name__).replace(':', '_'))
        self.logger.setLevel(logging.INFO)
        self.species = model_code.split(':')[0]
        self.model_name = model_code.split(':')[-1]
        if self.species.lower() == 'ecoli':
            if self.model_name.lower() == 'iml1515':
                self.model = load_json_model(
                    os.path.dirname(os.path.abspath(__file__)) +
                    '/data/ecoli/iML1515.json')
                self.biomass_reaction = 'BIOMASS_Ec_iML1515_core_75p37M'
            else:
                self.model = load_json_model(
                    os.path.dirname(os.path.abspath(__file__)) +
                    '/data/ecoli/iJO1366.json')
                self.biomass_reaction = 'BIOMASS_Ec_iJO1366_core_53p95M'
            self.model.objective = self.biomass_reaction
            self.model.reactions.get_by_id(
                self.biomass_reaction).lower_bound = min_biomass
            self.model.solver = solver
        self._init_model()
        self._print_summary()

        self.reserved_bounds = {}
        self.dummy_reactions = set()
Пример #2
0
 def _load_model(self, file_storage):
     try:
         filename, content = self._decompress(file_storage.filename.lower(),
                                              file_storage)
     except IOError as err:
         msg = "Failed to decompress file."
         LOGGER.exception(msg)
         api.abort(400, msg, error=str(err))
     try:
         if file_storage.mimetype in self.JSON_TYPES or \
                 filename.endswith("json"):
             LOGGER.debug("Loading model from JSON.")
             model = load_json_model(content)
         elif file_storage.mimetype in self.XML_TYPES or \
                 filename.endswith("xml") or filename.endswith("sbml"):
             LOGGER.debug("Loading model from SBML.")
             model = read_sbml_model(content)
         else:
             msg = f"'{file_storage.mimetype}' is an unhandled MIME type."
             LOGGER.error(msg)
             api.abort(415, msg, recognizedMIMETypes=list(chain(
                 self.JSON_TYPES, self.XML_TYPES)))
     except (CobraSBMLError, ValueError) as err:
         msg = "Failed to parse model."
         LOGGER.exception(msg)
         api.abort(400, msg, error=str(err))
     finally:
         content.close()
         file_storage.close()
     return model
Пример #3
0
def load_model_from_file(filename):
    """ Load a model from a file based on the extension of the file name.

    Parameters
    ----------
    filename : str
        Path to model file

    Returns
    -------
    cobra.core.Model
        Model object loaded from file

    Raises
    ------
    IOError
        If model file extension is not supported.
    """

    (root, ext) = splitext(filename)
    if ext == '.mat':
        model = load_matlab_model(filename)
    elif ext == '.xml' or ext == '.sbml':
        model = read_sbml_model(filename)
    elif ext == '.json':
        model = load_json_model(filename)
    else:
        raise IOError(
            'Model file extension not supported for {0}'.format(filename))
    return model
Пример #4
0
def load_universal_modelseed():
    seed_rxn_table = pd.read_csv(
        './medusa/test/data/reactions_seed_20180809.tsv', sep='\t')
    seed_rxn_table['id'] = seed_rxn_table['id'] + '_c'
    universal = load_json_model('./medusa/test/data/universal_mundy.json')
    # remove any reactions from the universal that don't have "OK" status
    # in modelSEED (guards against mass and charge-imbalanced reactions)
    ok_ids = list(seed_rxn_table.loc[(seed_rxn_table['status'] == 'OK') |
                                     (seed_rxn_table['status'] == 'HB')]['id'])
    remove_rxns = []
    for reaction in universal.reactions:
        if reaction.id not in ok_ids:
            remove_rxns.append(reaction)
    universal.remove_reactions(remove_rxns)
    # remove metabolites from the universal that are no longer present in any
    # reactions.
    mets_in_reactions = []
    for reaction in universal.reactions:
        mets = [met.id for met in reaction.metabolites]
        mets_in_reactions.extend(mets)
    mets_in_reactions = set(mets_in_reactions)

    mets_missing_reactions = []
    for metabolite in universal.metabolites:
        if metabolite.id not in mets_in_reactions:
            mets_missing_reactions.append(metabolite)
    universal.remove_metabolites(mets_missing_reactions)

    universal.repair()
    return universal
Пример #5
0
def _create_visualisation(model_filename, svg_filename, output_filename, analysis_type='FBA',
                          analysis_results=None, intermediate_filename=None):
    # Check arguments
    supported_analysis_types = {"FBA", "FVA"}
    if analysis_type not in supported_analysis_types:
        message = "Analysis type is wrong. It has to be one of the values: {}"
        raise ValueError(message.format(supported_analysis_types))
    try:
        model = cio.load_json_model(model_filename)
    except Exception as exc:
        raise CobraModelFileError("Failed to load model from given JSON : {}".format(exc.args))
    # Set default arguments if none provided
    if analysis_results is None:
        fba_results = model.optimize()
        if analysis_type == 'FBA':
            fba_results.fluxes = fba_results.fluxes.round(5)
            analysis_results = fba_results
        elif analysis_type == 'FVA':
            fva_results = flux_variability_analysis(model, fraction_of_optimum=0.5)
            fva_results = fva_results.round(3)
            analysis_results = fva_results
    vizan_kwargs = {
        'model': model,
        'file_source_path': svg_filename,
        'analysis_results': analysis_results,
        'analysis_type': analysis_type,
        'output_filename': output_filename,
    }
    if intermediate_filename is None:
        with NamedTemporaryFile(mode="w") as intermediate_file:
            vizan_kwargs['intermediate_filename'] = intermediate_file.name
            produce_output_file(**vizan_kwargs)
    else:
        vizan_kwargs['intermediate_filename'] = intermediate_filename
        produce_output_file(**vizan_kwargs)
Пример #6
0
def _load_model_from_file(path, handle):
    """Try to parse a model from a file handle using different encodings."""
    logger.debug('Reading file from %s assuming pickled model.' % path)
    try:
        model = pickle.load(handle)
    except (TypeError, pickle.UnpicklingError):
        logger.debug('Cannot unpickle %s. Assuming json model next.' % path)
        try:
            model = load_json_model(path)
        except ValueError:
            logger.debug(
                "Cannot import %s as json model. Assuming sbml model next." %
                path)
            try:
                model = read_sbml_model(path)
            except AttributeError as e:
                logger.error(
                    "cobrapy doesn't raise a proper exception if a file does not contain an SBML model"
                )
                raise e
            except Exception as e:
                logger.error(
                    "Looks like something blow up while trying to import {} as a SBML model."
                    "Try validating the model at http://sbml.org/Facilities/Validator/ to get more information."
                    .format(path))
                raise e
    return model
Пример #7
0
def create_cobra_model_from_bigg_model(bigg_id, validate=False):
    """ Create a COBRA model from a BiGG model.

    Parameters
    ----------
    bigg_id : str
        ID of BiGG model
    validate : bool, optional
        When True, perform validity checks on COBRA model

    Returns
    -------
    cobra.core.Model
        COBRA model created from JSON representation of BiGG model
    """

    # Download the JSON representation and details of the model from BiGG.
    LOGGER.info('Started download of %s model', bigg_id)
    response = requests.get('{0}models/{1}'.format(bigg_url, bigg_id))
    if response.status_code != requests.codes.OK:
        response.raise_for_status()
    details = response.json()

    response = requests.get('{0}models/{1}/download'.format(bigg_url, bigg_id))
    if response.status_code != requests.codes.OK:
        response.raise_for_status()
    LOGGER.info('Finished download of %s model', bigg_id)

    # Convert to a cobra.Model object.
    with io.StringIO(response.text) as f:
        model = load_json_model(f)

    # Add some details to the Model object.
    model.name = details['organism']
    model.notes['genome_name'] = details['genome_name']
    model.notes['reference_type'] = details['reference_type']
    model.notes['reference_id'] = details['reference_id']
    model.notes['source'] = 'BiGG'

    # Confirm a few basics.
    if len(model.reactions) != details['reaction_count']:
        warn('{0} reactions in model does not equal {1} in model details'.
             format(len(model.reactions), details['reaction_count']))
    if len(model.metabolites) != details['metabolite_count']:
        warn('{0} metabolites in model does not equal {1} in model details'.
             format(len(model.metabolites), details['metabolite_count']))
    if len(model.genes) != details['gene_count']:
        warn('{0} genes in model does not equal {1} in model details'.format(
            len(model.genes), details['gene_count']))

    # If requested, validate the COBRA model.
    if validate:
        warn('Coming soon')

    return model
Пример #8
0
def get_model_from_bigg(id):
    try:
        response = requests.get('http://bigg.ucsd.edu/api/v2/models/{}/download'.format(id))
    except requests.ConnectionError as e:
        logger.error("Cannot reach http://bigg.ucsd.edu. Are you sure that you are connected to the internet?")
        raise e
    if response.ok:
        with io.StringIO(response.text) as f:
            return to_solver_based_model(load_json_model(f))
    else:
        raise Exception(
            "Could not download model {}. bigg.ucsd.edu returned status code {}".format(id, response.status_code))
Пример #9
0
def test_reaction_bounds_json(data_directory, tmp_path):
    """Test reading and writing of model with inf bounds in json"""
    """Path to XML file with INF bounds"""
    path_to_xml_inf_file = join(data_directory, "fbc_ex1.xml")
    model_xml_inf = cio.read_sbml_model(path_to_xml_inf_file)
    path_to_output = join(str(tmp_path), "fbc_ex1_json.json")
    """Saving model with inf bounds in json form without error"""
    cio.save_json_model(model_xml_inf, path_to_output)
    """Path to JSON file with INF bounds"""
    path_to_JSON_inf_file = join(data_directory, "JSON_with_inf_bounds.json")
    model_json_inf = cio.load_json_model(path_to_JSON_inf_file)
    assert model_json_inf.reactions[0].upper_bound == float("inf")
Пример #10
0
def test_load_json_model_valid(data_directory, tmp_path):
    """Test loading a valid annotation from JSON."""
    path_to_file = join(data_directory, "valid_annotation_format.json")
    model = load_json_model(path_to_file)
    expected = {
        "bigg.reaction": [["is", "PFK26"]],
        "kegg.reaction": [["is", "R02732"]],
        "rhea": [["is", "15656"]],
    }
    for metabolite in model.metabolites:
        assert metabolite.annotation == expected
    path_to_output = join(str(tmp_path), "valid_annotation_output.xml")
    write_sbml_model(model, path_to_output)
Пример #11
0
 def get_form_data(test_analysis_type, with_analysis=False):
     data = {
         'model': open(test_model_filename, 'rb'),
         'svg': open(test_svg_filename, 'rb'),
         'analysis_type': test_analysis_type,
     }
     if with_analysis:
         return data
     else:
         model = load_json_model(test_model_filename)
         analysis_json = analysis_in_json(model, test_analysis_type)
         data['analysis_results'] = StringIO(analysis_json)
         return data
Пример #12
0
def test_load_json_model_valid(data_directory, tmp_path):
    """Test loading a valid annotation from JSON."""
    path_to_file = join(data_directory, "valid_annotation_format.json")
    model = load_json_model(path_to_file)
    expected = {
        'bigg.reaction': [['is', 'PFK26']],
        'kegg.reaction': [['is', 'R02732']],
        'rhea': [['is', '15656']]
    }
    for metabolite in model.metabolites:
        assert metabolite.annotation == expected
    path_to_output = join(str(tmp_path), 'valid_annotation_output.xml')
    write_sbml_model(model, path_to_output)
Пример #13
0
def perform_visualisation(model_filename, svg_filename, analysis_type='FBA',
                          analysis_results=None, output_filename=None, intermediate_filename=None):
    model = cio.load_json_model(model_filename)
    if analysis_results is None:
        model, analysis_results = perform_analysis(model, analysis_type)
    else:
        model.optimize()
    if output_filename is None:
        output_file = tempfile.NamedTemporaryFile(mode="w")
        output_filename = output_file.name
    if intermediate_filename is None:
        intermediate_file = tempfile.NamedTemporaryFile(mode="w")
        intermediate_filename = intermediate_file.name
    vizan.call_vizan_cli(model, svg_filename, analysis_results, analysis_type, output_filename, intermediate_filename)
Пример #14
0
def load_cbm(model_name) -> Model:
    """
    Load a JSON cobra model stored in the static folder.

    Parameters
    ----------
    model_name : str
        The name of the model.

    Returns
    -------
    cobra.Model
        The loaded cobra model.
    """
    return load_json_model(static_path(model_name + ".json"))
Пример #15
0
def get_model_from_bigg(id, solver_interface=optlang, sanitize=True):
    try:
        response = requests.get('http://bigg.ucsd.edu/api/v2/models/{}/download'.format(id))
    except requests.ConnectionError as e:
        logger.error("Cannot reach http://bigg.ucsd.edu. Are you sure that you are connected to the internet?")
        raise e
    if response.ok:
        with io.StringIO(response.text) as f:
            model = load_json_model(f)
            model.solver = solver_interface
            if sanitize:
                sanitize_ids(model)
            return model
    else:
        raise Exception(
            "Could not download model {}. bigg.ucsd.edu returned status code {}".format(id, response.status_code))
Пример #16
0
 def _load_model(self, file_storage):
     try:
         filename, content = self._decompress(file_storage.filename.lower(),
                                              file_storage)
     except IOError as err:
         msg = f"Failed to decompress file: {str(err)}"
         LOGGER.exception(msg)
         abort(400, msg)
     try:
         if file_storage.mimetype in self.JSON_TYPES or filename.endswith(
                 "json"):
             LOGGER.debug("Loading model from JSON using cobrapy.")
             model = load_json_model(content)
         elif (file_storage.mimetype in self.XML_TYPES
               or filename.endswith("xml") or filename.endswith("sbml")):
             LOGGER.debug("Loading model from SBML using memote.")
             # Memote accepts only a file path, so write to a temporary file.
             with tempfile.NamedTemporaryFile() as file_:
                 file_.write(content.getvalue())
                 file_.seek(0)
                 model, sbml_ver, notifications = memote.validate_model(
                     file_.name, )
             if model is None:
                 LOGGER.info("SBML validation failure")
                 raise SBMLValidationError(
                     code=400,
                     warnings=notifications["warnings"],
                     errors=notifications["errors"],
                 )
         else:
             mime_types = ", ".join((chain(self.JSON_TYPES,
                                           self.XML_TYPES)))
             msg = (f"'{file_storage.mimetype}' is an unhandled MIME type. "
                    f"Recognized MIME types are: {mime_types}")
             LOGGER.warning(msg)
             abort(415, msg)
     except (CobraSBMLError, ValueError) as err:
         msg = f"Failed to parse model: {str(err)}"
         LOGGER.exception(msg)
         abort(400, msg)
     finally:
         content.close()
         file_storage.close()
     return model
Пример #17
0
def get_model_from_bigg(id, solver_interface=optlang, sanitize=True):
    try:
        response = requests.get(
            'http://bigg.ucsd.edu/api/v2/models/{}/download'.format(id))
    except requests.ConnectionError as e:
        logger.error(
            "Cannot reach http://bigg.ucsd.edu. Are you sure that you are connected to the internet?"
        )
        raise e
    if response.ok:
        with io.StringIO(response.text) as f:
            model = load_json_model(f)
            model.solver = solver_interface
            if sanitize:
                sanitize_ids(model)
            return model
    else:
        raise Exception(
            "Could not download model {}. bigg.ucsd.edu returned status code {}"
            .format(id, response.status_code))
Пример #18
0
def fva(json):
    model = load_json_model(json)
    bio = {
        reaction: reaction.objective_coefficient
        for reaction in model.reactions if search('biomass', reaction.name)
    }
    biom = bio.keys()[0]
    # set the objective
    model.change_objective(biom)
    # add constraints
    model.optimize()
    f0 = model.solution.f
    # get a dictionary with all non-zero fluxes
    fluxes = {
        reaction.id: reaction.x
        for reaction in model.reactions if reaction.x != 0
    }
    # first time store for the wild type
    v = variability.flux_variability_analysis(model)
    return v
Пример #19
0
def analyze(name='ptfa_mlp', model='iML1515', threshold=0):
    coefs = pd.read_csv(f'output/{name}_coefs.csv', index_col=0)
    # coefs = (coefs - coefs.min()) / (coefs.max() - coefs.min()) * 100
    coefs = coefs[(coefs >= threshold).all(axis=1)]
    model = load_json_model(
        os.path.dirname(os.path.abspath(__file__)) +
        f'/../simulation/data/ecoli/{model}.json')

    reactions = []
    for react_id in coefs.index:
        score = coefs.loc[react_id, 'All_IC50']
        if react_id.endswith('_b'):
            react_id = react_id[:-2]
        if model.reactions.has_id(react_id):
            reaction = model.reactions.get_by_id(react_id)
            subsystem = reaction.subsystem
            subsystem = subsystem.replace(',', ':')
            kegg_react_id = ''
            biocyc_react_id = ''
            if 'kegg.reaction' in reaction.annotation:
                kegg_react_id = reaction.annotation['kegg.reaction'][0]
            if 'biocyc' in reaction.annotation:
                biocyc_react_id = reaction.annotation['biocyc'][0][5:]
            reactions.append(
                [react_id, subsystem, kegg_react_id, biocyc_react_id, score])

    results = pd.DataFrame(
        reactions,
        columns=['id', 'subsystem', 'kegg.reaction', 'biocyc', 'score'])
    results = results[~((results['kegg.reaction'] == '') |
                        (results['biocyc'] == ''))].set_index('id')
    results = results[~results.index.duplicated()]
    results.to_csv(f'output/reactions/{name}_scores.csv')
    results['kegg.reaction'].to_csv(f'output/reactions/{name}_kegg.txt',
                                    sep=' ',
                                    index=False,
                                    header=False)
    results['biocyc'].to_csv(f'output/reactions/{name}_biocyc.txt',
                             sep=' ',
                             index=False,
                             header=False)
Пример #20
0
def model_loader(gem_file_path, gem_file_type):
    """Consolidated function to load a GEM using COBRApy. Specify the file type being loaded.

    Args:
        gem_file_path (str): Path to model file
        gem_file_type (str): GEM model type - ``sbml`` (or ``xml``), ``mat``, or ``json`` format

    Returns:
        COBRApy Model object.

    """

    if gem_file_type.lower() == 'xml' or gem_file_type.lower() == 'sbml':
        model = read_sbml_model(gem_file_path)
    elif gem_file_type.lower() == 'mat':
        model = load_matlab_model(gem_file_path)
    elif gem_file_type.lower() == 'json':
        model = load_json_model(gem_file_path)
    else:
        raise ValueError('File type must be "sbml", "xml", "mat", or "json".')

    return model
Пример #21
0
def _load_model_from_file(path, handle):
    """Try to parse a model from a file handle using different encodings."""
    logger.debug('Reading file from %s assuming pickled model.' % path)
    try:
        model = pickle.load(handle)
    except (TypeError, pickle.UnpicklingError):
        logger.debug('Cannot unpickle %s. Assuming json model next.' % path)
        try:
            model = load_json_model(path)
        except ValueError:
            logger.debug("Cannot import %s as json model. Assuming sbml model next." % path)
            try:
                model = read_sbml_model(path)
            except AttributeError as e:
                logger.error("cobrapy doesn't raise a proper exception if a file does not contain an SBML model")
                raise e
            except Exception as e:
                logger.error(
                    "Looks like something blow up while trying to import {} as a SBML model."
                    "Try validating the model at http://sbml.org/Facilities/Validator/ to get more information.".format(
                        path))
                raise e
    return model
Пример #22
0
def pub_model(request):
    # get a specific model. This fixture and all the tests that use it will run
    # for every model in the model_files list.
    model_file = request.param
    model_path = join(settings.model_directory, model_file)

    # load the file
    start = time()
    try:
        if model_path.endswith('.xml'):
            # LibSBML does not like unicode filepaths in Python 2.7
            pub_model = read_sbml_model(str(model_path))
        elif model_path.endswith('.mat'):
            pub_model = load_matlab_model(model_path)
        elif model_path.endswith('.json'):
            pub_model = load_json_model(model_path)
        else:
            raise Exception('Unrecongnized extension for model %s' % model_file)
    except IOError:
        raise Exception('Could not find model %s' % model_path)
    print("Loaded %s in %.2f sec" % (model_file, time() - start))

    return pub_model
Пример #23
0
def _load_model_from_file(path, handle):
    """Try to parse a model from a file handle using different encodings.

    Adapted from cameo.
    """
    try:
        model = pickle.load(handle)
    except (TypeError, pickle.UnpicklingError):
        try:
            model = load_json_model(path)
        except ValueError:
            try:
                model = load_matlab_model(path)
            except ValueError:
                try:
                    model = read_sbml_model(path)
                except AttributeError:
                    click.ClickException(
                        "cobrapy doesn't raise a proper exception"
                        " if a file does not contain an SBML model"
                    ).show()
                except Exception as e:
                    click.ClickException(e).show()
    return model
Пример #24
0
def test_load_json_model(data_directory, mini_model):
    """Test the reading of JSON model."""
    json_model = cio.load_json_model(join(data_directory, "mini.json"))
    assert compare_models(mini_model, json_model) is None
Пример #25
0
def test_load_json_model(data_directory, mini_model):
    """Test the reading of JSON model."""
    json_model = cio.load_json_model(join(data_directory, "mini.json"))
    assert compare_models(mini_model, json_model) is None
Пример #26
0
def load_model(path_or_handle, solver_interface=optlang.glpk_interface, sanitize=True):
    """Read a metabolic model .

    Parameters
    ----------
    path_or_handle : path, fhandle or name.
        One of:
            * file path of a model file;
            * file handle to a SBML or pickled model; or
            * the identifier of a model in a web database (optflux.org/models)
    solver_interface : solver_interface, optional
        E.g. optlang.glpk_interface or any other optlang interface.
    sanitize : boolean, optional
        If reaction and metabolite IDs should be sanitized (works only for SBML models).
    """

    if isinstance(path_or_handle, str):
        path = path_or_handle
        try:
            handle = open(path_or_handle, 'rb')
        except IOError:
            logger.debug('%s not a file path. Querying webmodels ... trying http://bigg.ucsd.edu first' % path)
            try:
                return cameo.models.webmodels.get_model_from_bigg(path)
            except:
                logger.debug('%s not a file path. Querying webmodels ... trying minho next' % path)
                try:
                    df = cameo.models.webmodels.index_models_minho()
                except requests.ConnectionError as e:
                    logger.error("You need to be connected to the internet to load an online model.")
                    raise e
                except Exception as e:
                    logger.error("Something went wrong while looking up available webmodels.")
                    raise e
                try:
                    index = df.query('name == "%s"' % path_or_handle).id.values[0]
                    handle = cameo.models.webmodels.get_sbml_file(index)
                    path = handle.name
                except IndexError:
                    raise ValueError("%s is neither a file nor a model ID." % path)
    elif hasattr(path_or_handle, 'read'):
        path = path_or_handle.name
        handle = path_or_handle
    else:
        raise ValueError('Provided argument %s has to be either a file path or handle' % path_or_handle)
    logger.debug('Reading file from %s assuming pickled model.' % path)
    try:
        model = pickle.load(handle)
    except Exception:
        logger.debug('Cannot unpickle %s. Assuming json model next.' % path)
        try:
            model = load_json_model(path)
        except Exception:
            logger.debug("Cannot import %s as json model. Assuming sbml model next." % path)
            try:
                model = read_sbml_model(path)
            except AttributeError as e:
                logger.error("cobrapy doesn't raise a proper exception if a file does not contain an SBML model")
                raise e
            except Exception as e:
                logger.error(
                    "Looks like something blow up while trying to import {} as a SBML model. Try validating the model at http://sbml.org/Facilities/Validator/ to get more information.".format(
                        path))
                raise e
    if sanitize:
        sanitize_ids(model)

    if not isinstance(model, SolverBasedModel):
        if solver_interface is not None:
            logger.debug("Changing solver interface to %s" % solver_interface)
            model = to_solver_based_model(model, solver_interface=solver_interface)
    else:
        if model.interface is not solver_interface and solver_interface is not None:
            logger.debug("Changing solver interface to %s" % solver_interface)
            model.solver = solver_interface

    return model
Пример #27
0
'''
Created on 5 Oct 2017

@author: mate

This script reads in the iJR904 E. Coli model from a json format, 
and adds the BDO synthesis pathway to it
'''
# from cobra import Reaction, Metabolite

#this is a very detailed E. Coli model with about 1000 enzymes:

from cobra.io import load_json_model
bigg = load_json_model("/home/mate/code/met_model/src/models/iJR904.json")

newReaction = bigg.reactions.get_by_id("BIOMASS_Ecoli")

newThing = bigg.reactions.get_by_id("EX_glc__D_e")

print(newThing.reaction)

print(newThing.metabolites)

bigg.objective = newReaction
bigg.optimize()
print(bigg.objective.value)
print(newThing.flux)

print(len(bigg.reactions))
print(len(bigg.metabolites))
print(len(bigg.genes))
Пример #28
0
GLPK = 'optlang-glpk'
solver = GUROBI

case = 'full'  # 'reduced' or full'

# Load reaction DB
print("Loading thermo data...")
thermo_data = load_thermoDB(thermo_database)

print("Done !")
#biomass_rxn = 'BIOMASS_Ec_iJO1366_WT_53p95M'
biomass_rxn = 'Ec_biomass_iJO1366_WT_53p95M'

# We import pre-compiled data as it is faster for bigger models
model_path = '/projectnb2/bioinfor/SEGRE/goldford/CoenzymeSpecificity/pytfa/models'
cobra_model = load_json_model(model_path +
                              '/iJO1366_WT_semi-unconstrained.11Oct2021.json')
lexicon = read_lexicon(model_path + '/iJO1366/lexicon.csv')
compartment_data = read_compartment_data(model_path +
                                         '/iJO1366/compartment_data.json')

# Initialize the cobra_model
mytfa = pytfa.ThermoModel(thermo_data, cobra_model)

# Annotate the cobra_model
annotate_from_lexicon(mytfa, lexicon)
apply_compartment_data(mytfa, compartment_data)

mytfa.name = 'iJO1366[WT]'
mytfa.solver = solver
mytfa.objective = biomass_rxn
Пример #29
0
def test_load_json_model_invalid(data_directory):
    """Test that loading an invalid annotation from JSON raises TypeError"""
    path = join(data_directory, "invalid_annotation_format.json")
    with pytest.raises(TypeError):
        model = load_json_model(path)
Пример #30
0
import cobra.test
from cobra import Model, Reaction, Metabolite
from cobra.io import read_sbml_model, load_json_model
from cobra.flux_analysis import moma, add_moma
import json, sys

try:
    assert (len(sys.argv) == 2)
    method = sys.argv[1].lower()
    assert (method in ['fba', 'moma'])
except:
    print(('Usage: %s <fba, moma>') % sys.argv[0])
    exit()

model = load_json_model('data/iML1428-iso_Glucose.json')

# Set constrants for aerobic growth in glucose minimal media
model.reactions.get_by_id('EX_glc__D_e').lower_bound = -10
model.reactions.get_by_id('EX_o2_e').lower_bound = -15

# Load experiments and reaction data

experiments = [
    'Isozyme overexpression',
    ('glyA', 'ltaE'),
    ('ilvA', 'tdcB'),
    ('ilvE', 'avtA'),
    ('metC', 'malY'),
    'Substrate ambiguity',
    ('glnA', 'asnB'),
    ('pdxB', 'tdh'),
GLPK = 'optlang-glpk'
solver = GUROBI

case = 'full'  # 'reduced' or full'

# Load reaction DB
print("Loading thermo data...")
thermo_data = load_thermoDB(thermo_database)

print("Done !")
#biomass_rxn = 'BIOMASS_Ec_iJO1366_WT_53p95M'
biomass_rxn = 'Ec_biomass_iJO1366_WT_53p95M'

# We import pre-compiled data as it is faster for bigger models
model_path = '/projectnb2/bioinfor/SEGRE/goldford/CoenzymeSpecificity/pytfa/models'
cobra_model = load_json_model(model_path + '/iJO1366_NAD_ratio_1.fromTFA.json')
lexicon = read_lexicon(model_path + '/iJO1366/lexicon.csv')
compartment_data = read_compartment_data(model_path +
                                         '/iJO1366/compartment_data.json')

# Initialize the cobra_model
mytfa = pytfa.ThermoModel(thermo_data, cobra_model)

# Annotate the cobra_model
annotate_from_lexicon(mytfa, lexicon)
apply_compartment_data(mytfa, compartment_data)

mytfa.name = 'iJO1366[NAD]'
mytfa.solver = solver
mytfa.objective = biomass_rxn
Пример #32
0
def test_load_json(db_model):
    model = load_json_model(join(static_model_dir, db_model.id + '.json'))
    assert model.id == db_model.id
Пример #33
0
def load_modelseed_model(model_name):
    if model_name == 'Staphylococcus aureus':
        model = load_json_model('./medusa/test/data/' + model_name + '.json')
    else:
        raise ValueError('Unsupported model_name provided')
    return model
Пример #34
0
    UB_DICT = {'c': 1e99, 'p': 1e99, 'e': 1e99}  # in M

sys.path.append(os.path.expanduser('~/git/equilibrator-api/'))
from equilibrator_api import ComponentContribution, Reaction
equilibrator = ComponentContribution(pH=CYTOPLASMIC_PH,
                                     ionic_strength=CYTOPLASMIC_IONIC_STRENGTH)

USE_CORE = False

if USE_CORE:
    from cobra.io import read_sbml_model
    cobra_model = read_sbml_model(settings.CORE_SBML_FNAME)
    BM_RXN = 'BIOMASS_Ecoli_core_w_GAM'
else:
    from cobra.io import load_json_model
    cobra_model = load_json_model(settings.IJO1366_JSON_FNAME)
    BM_RXN = 'BIOMASS_Ec_iJO1366_core_53p95M'

###############################################################################


def get_metabolite_df():
    bigg2kegg = []

    # manually add nh4_c and nh4_e (they appear as nh3 in the text
    # file BIGG_METABOLITE_FNAME, but in some models nh4 is used)
    bigg2kegg += [('nh4_c', 'nh4', 'C00014')]
    bigg2kegg += [('nh4_p', 'nh4', 'C00014')]
    bigg2kegg += [('nh4_e', 'nh4', 'C00014')]

    # manually add q8_c q8h2_c (ubiquinone and ubiquinol)