Пример #1
0
def test_simres_observable():
    """ Test on demand observable evaluation """
    models = [tyson_oscillator.model, robertson.model,
              expression_observables.model, earm_1_3.model,
              bax_pore_sequential.model, bax_pore.model,
              bngwiki_egfr_simple.model]
    for model in models:
        generate_equations(model)
        spm = SpeciesPatternMatcher(model)
        for obs in model.observables:
            dyn_obs = spm.match(pattern=obs.reaction_pattern, index=True,
                                counts=True)

            # Need to sort by species numerical order for comparison purposes
            dyn_obs = collections.OrderedDict(sorted(dyn_obs.items()))

            dyn_species = list(dyn_obs.keys())

            if obs.match == 'species':
                dyn_coeffs = [1] * len(dyn_obs)
            else:
                dyn_coeffs = list(dyn_obs.values())

            assert dyn_species == obs.species
            assert dyn_coeffs == obs.coefficients
Пример #2
0
    def __init__(self,
                 model,
                 inputs=None,
                 stop_time=1000,
                 outside_name_map=None):
        self.model = model
        generate_equations(model)

        self.inputs = inputs if inputs else []
        self.stop_time = stop_time
        self.outside_name_map = outside_name_map if outside_name_map else {}

        self.dt = numpy.array(10.0)
        self.units = 'seconds'
        self.sim = None
        self.attributes = copy.copy(default_attributes)
        self.species_name_map = {}
        for idx, species in enumerate(self.model.species):
            monomer = species.monomer_patterns[0].monomer
            self.species_name_map[monomer.name] = idx
        self.input_vars = self._get_input_vars()
        # These attributes are related to the simulation state
        self.state = numpy.array([100.0 for s in self.species_name_map.keys()])
        self.time = numpy.array(0.0)
        self.status = 'start'
        self.time_course = [(self.time, self.state)]
        # EMELI needs a DONE attribute
        self.DONE = False
Пример #3
0
    def __init__(self, solver, values_to_sample, objective_function,
                 observable):
        self._model = solver.model
        self._logger = get_logger(__name__, model=self._model)
        self._logger.info('%s created for observable %s' %
                          (self.__class__.__name__, observable))
        generate_equations(self._model)
        self._ic_params_of_interest_cache = None
        self._values_to_sample = values_to_sample
        if solver is None or not isinstance(solver,
                                            pysb.simulator.base.Simulator):
            raise (TypeError, "solver must be a pysb.simulator object")
        self._solver = solver
        self.objective_function = objective_function
        self.observable = observable

        # Outputs
        self.b_matrix = []
        self.b_prime_matrix = []
        self.p_prime_matrix = np.zeros(self._size_of_matrix)
        self.p_matrix = np.zeros(self._size_of_matrix)

        self._original_initial_conditions = np.zeros(len(self._model.species))
        self._index_of_species_of_interest = self._create_index_of_species()
        self.simulation_initials = self._setup_simulations()
        # Stores the objective function value for the original unperturbed
        # model
        self._objective_fn_standard = None
Пример #4
0
def parameter_sweep(model, sigma, ns):
    generate_equations(model)
    logp = [numpy.log10(p.value) for p in model.parameters]
    ts = numpy.linspace(0, 20*3600, 20*60)
    solver = Solver(model, ts)
    pf.set_fig_params()
    plt.figure(figsize=(1.8, 1), dpi=300)
    for i in range(ns):
        psample = sample_params(logp, 0.05)
        res = solver.run(param_values=psample)
        signal = res.observables['p53_active']
        plt.plot(signal, color=(0.7, 0.7, 0.7), alpha=0.3)
    # Highlighted
    colors = ['g', 'y', 'c']
    for c in colors:
        psample = sample_params(logp, 0.05)
        res = solver.run(param_values=psample)
        signal = res.observables['p53_active']
        plt.plot(signal, c)

    # Nominal
    solver = Solver(model, ts)
    res = solver.run()
    signal = res.observables['p53_active']
    plt.plot(signal, 'r')

    plt.xticks([])
    plt.xlabel('Time (a.u.)', fontsize=7)
    plt.ylabel('Active p53', fontsize=7)
    plt.yticks([])
    plt.ylim(ymin=0)
    pf.format_axis(plt.gca())
    plt.savefig(model.name + '_sample.pdf')
Пример #5
0
def test_simres_observable():
    """ Test on demand observable evaluation """
    models = [tyson_oscillator.model, robertson.model,
              expression_observables.model, earm_1_3.model,
              bax_pore_sequential.model, bax_pore.model,
              bngwiki_egfr_simple.model]
    for model in models:
        generate_equations(model)
        spm = SpeciesPatternMatcher(model)
        for obs in model.observables:
            dyn_obs = spm.match(pattern=obs.reaction_pattern, index=True,
                                counts=True)

            # Need to sort by species numerical order for comparison purposes
            dyn_obs = collections.OrderedDict(sorted(dyn_obs.items()))

            dyn_species = list(dyn_obs.keys())

            if obs.match == 'species':
                dyn_coeffs = [1] * len(dyn_obs)
            else:
                dyn_coeffs = list(dyn_obs.values())

            assert dyn_species == obs.species
            assert dyn_coeffs == obs.coefficients
Пример #6
0
def parameter_sweep(model, sigma, ns):
    generate_equations(model)
    logp = [numpy.log10(p.value) for p in model.parameters]
    ts = numpy.linspace(0, 20 * 3600, 20 * 60)
    solver = Solver(model, ts)
    pf.set_fig_params()
    plt.figure(figsize=(1.8, 1), dpi=300)
    for i in range(ns):
        psample = sample_params(logp, 0.05)
        res = solver.run(param_values=psample)
        signal = res.observables['p53_active']
        plt.plot(signal, color=(0.7, 0.7, 0.7), alpha=0.3)
    # Highlighted
    colors = ['g', 'y', 'c']
    for c in colors:
        psample = sample_params(logp, 0.05)
        res = solver.run(param_values=psample)
        signal = res.observables['p53_active']
        plt.plot(signal, c)

    # Nominal
    solver = Solver(model, ts)
    res = solver.run()
    signal = res.observables['p53_active']
    plt.plot(signal, 'r')

    plt.xticks([])
    plt.xlabel('Time (a.u.)', fontsize=7)
    plt.ylabel('Active p53', fontsize=7)
    plt.yticks([])
    plt.ylim(ymin=0)
    pf.format_axis(plt.gca())
    plt.savefig(model.name + '_sample.pdf')
Пример #7
0
    def setUp(self):
        Monomer('A', ['a'])
        Monomer('B', ['b'])

        Parameter('ksynthA', 100)
        Parameter('ksynthB', 100)
        Parameter('kbindAB', 100)

        Parameter('A_init', 0)
        Parameter('B_init', 0)

        Initial(A(a=None), A_init)
        Initial(B(b=None), B_init)

        Observable("A_free", A(a=None))
        Observable("B_free", B(b=None))
        Observable("AB_complex", A(a=1) % B(b=1))

        Rule('A_synth', None >> A(a=None), ksynthA)
        Rule('B_synth', None >> B(b=None), ksynthB)
        Rule('AB_bind', A(a=None) + B(b=None) >> A(a=1) % B(b=1), kbindAB)

        self.model = model
        generate_equations(self.model)

        # Convenience shortcut for accessing model monomer objects
        self.mon = lambda m: self.model.monomers[m]

        # This timespan is chosen to be enough to trigger a Jacobian evaluation
        # on the various solvers.
        self.time = np.linspace(0, 1)
        self.sim = BngSimulator(self.model, tspan=self.time)
Пример #8
0
    def __init__(self, model, inputs=None, stop_time=1000,
                 outside_name_map=None):
        self.model = model
        generate_equations(model)

        self.inputs = inputs if inputs else []
        self.stop_time = stop_time
        self.outside_name_map = outside_name_map if outside_name_map else {}

        self.dt = numpy.array(10.0)
        self.units = 'seconds'
        self.sim = None
        self.attributes = copy.copy(default_attributes)
        self.species_name_map = {}
        for idx, species in enumerate(self.model.species):
            monomer = species.monomer_patterns[0].monomer
            self.species_name_map[monomer.name] = idx
        self.input_vars = self._get_input_vars()
        # These attributes are related to the simulation state
        self.state = numpy.array([100.0 for s in self.species_name_map.keys()])
        self.time = numpy.array(0.0)
        self.status = 'start'
        self.time_course = [(self.time, self.state)]
        # EMELI needs a DONE attribute
        self.DONE = False
Пример #9
0
def find_nonimportant_nodes(model):
    """
    This function looks a the bidirectional reactions and finds the nodes that only have one incoming and outgoing
    reaction (edge)

    Parameters
    ----------
    model : pysb.Model
        PySB model to use

    Returns
    -------
    a list of non-important nodes
    """
    if not model.odes:
        generate_equations(model)

    # gets the reactant and product species in the reactions
    rcts_sp = sum([i['reactants'] for i in model.reactions_bidirectional], ())
    pdts_sp = sum([i['products'] for i in model.reactions_bidirectional], ())
    # find the reactants and products that are only used once
    non_imp_rcts = set([x for x in range(len(model.species)) if rcts_sp.count(x) < 2])
    non_imp_pdts = set([x for x in range(len(model.species)) if pdts_sp.count(x) < 2])
    non_imp_nodes = set.intersection(non_imp_pdts, non_imp_rcts)
    passengers = non_imp_nodes
    return passengers
def stoichiometry_matrix(model):
    generate_equations(model)
    sm = np.zeros((len(model.species), len(model.reactions)))
    for i_s, sp in enumerate(model.species):
        for i_r, r in enumerate(model.reactions):
            sm[i_s][i_r] = r['products'].count(i_s) - r['reactants'].count(i_s)
    return sm
Пример #11
0
    def setUp(self):
        Monomer('A', ['a'])
        Monomer('B', ['b'])

        Parameter('ksynthA', 100)
        Parameter('ksynthB', 100)
        Parameter('kbindAB', 100)

        Parameter('A_init', 0)
        Parameter('B_init', 0)

        Initial(A(a=None), A_init)
        Initial(B(b=None), B_init)

        Observable("A_free", A(a=None))
        Observable("B_free", B(b=None))
        Observable("AB_complex", A(a=1) % B(b=1))

        Rule('A_synth', None >> A(a=None), ksynthA)
        Rule('B_synth', None >> B(b=None), ksynthB)
        Rule('AB_bind', A(a=None) + B(b=None) >> A(a=1) % B(b=1), kbindAB)

        self.model = model
        generate_equations(self.model)

        # Convenience shortcut for accessing model monomer objects
        self.mon = lambda m: self.model.monomers[m]

        # This timespan is chosen to be enough to trigger a Jacobian evaluation
        # on the various solvers.
        self.time = np.linspace(0, 1)
        self.sim = BngSimulator(self.model, tspan=self.time)
Пример #12
0
    def __init__(self, solver, values_to_sample, objective_function,
                 observable):
        self._model = solver.model
        self._logger = get_logger(__name__, model=self._model)
        self._logger.info('%s created for observable %s' % (
            self.__class__.__name__, observable))
        generate_equations(self._model)
        self._ic_params_of_interest_cache = None
        self._values_to_sample = values_to_sample
        if solver is None or not isinstance(solver,
                                            pysb.simulator.base.Simulator):
            raise(TypeError, "solver must be a pysb.simulator object")
        self._solver = solver
        self.objective_function = objective_function
        self.observable = observable

        # Outputs
        self.b_matrix = []
        self.b_prime_matrix = []
        self.p_prime_matrix = np.zeros(self._size_of_matrix)
        self.p_matrix = np.zeros(self._size_of_matrix)

        self._original_initial_conditions = np.zeros(len(self._model.species))
        self._index_of_species_of_interest = self._create_index_of_species()
        self.simulation_initials = self._setup_simulations()
        # Stores the objective function value for the original unperturbed
        # model
        self._objective_fn_standard = None
def test_non_python_name_phos():
    st = Phosphorylation(Agent('14-3-3'), Agent('BRAF kinase'))
    pa = PysbAssembler([st])
    pa.make_model()
    names = [m.name for m in pa.model.monomers]
    assert 'BRAF_kinase' in names
    assert 'p14_3_3' in names
    bng.generate_equations(pa.model)
Пример #14
0
def test_non_python_name_phos():
    st = Phosphorylation(Agent('14-3-3'), Agent('BRAF kinase'))
    pa = PysbAssembler([st])
    pa.make_model()
    names = [m.name for m in pa.model.monomers]
    assert 'BRAF_kinase' in names
    assert 'p14_3_3' in names
    bng.generate_equations(pa.model)
def stoichimetry_matrix_passengers(model, pruned_system):
    generate_equations(model)
    sm = np.zeros((len(pruned_system.keys()), len(model.reactions)))
    for i_s, pa in enumerate(pruned_system.keys()):
        for i_r, r in enumerate(model.reactions):
            if r['rate'] in pruned_system[pa].as_coefficients_dict().keys():
                sm[i_s][i_r] = r['products'].count(pa) - r['reactants'].count(pa)
    return sm
Пример #16
0
def visualization_path(model,
                       path,
                       target_node,
                       type_analysis,
                       filename,
                       rankdir='TB'):
    """
    Visualize dominant path

    Parameters
    ----------
    model: pysb.Model
        pysb model used for analysis
    path: list
        list that have the tree structure of the path, generated by the discretization step.
    target_node: str
        Species target used to obtain the dominant paths
    type_analysis: str
        Type of analysis done to obtain the path. It can either be `production` or `consumption`
    filename: str
        File name including the extension of the image file

    Returns
    -------

    """

    generate_equations(model)

    def find_numbers(dom_r_str):
        n = map(int, re.findall('\d+', dom_r_str))
        return n

    def nodenamefunc(node):
        node_idx = list(find_numbers(node.name))[0]
        node_sp = model.species[node_idx]
        node_name = parse_name(node_sp)
        return node_name

    def edgeattrfunc(node, child):
        return 'dir="back"'

    root = _create_tree(target_node, path)

    if type_analysis == 'production':
        DotExporter(root,
                    graph='strict digraph',
                    options=["rankdir={};".format(rankdir)],
                    nodenamefunc=nodenamefunc,
                    edgeattrfunc=edgeattrfunc).to_picture(filename)
    elif type_analysis == 'consumption':
        DotExporter(root,
                    graph='strict digraph',
                    options=["rankdir={};".format(rankdir)],
                    nodenamefunc=nodenamefunc,
                    edgeattrfunc=None).to_picture(filename)
    else:
        raise ValueError('Type of visualization not implemented')
Пример #17
0
 def __init__(self, model, simulations):
     super().__init__(model)
     self._trajectories, self._parameters, self._nsims, self._tspan = hf.get_simulations(
         simulations)
     self._par_name_idx = {
         j.name: i
         for i, j in enumerate(self.model.parameters)
     }
     generate_equations(self.model)
Пример #18
0
def path_differences(model, paths_labels, type_analysis='production'):
    """

    Parameters
    ----------
    model: PySB model
        Model used to do dominant path analysis
    paths_labels: dict
        Dictionary of pathways generated by dominant path analysis
    type_analysis: str
        Type of analysis used in the dominant path analysis.
        It can either be `production` or `consumption`

    Returns
    -------
    A pandas dataframe where the column names and row indices are the labels of the
    pathways and the cells contain the edges that are present in the
    row pathway index but not in the column pathway.
    """
    generate_equations(model)
    importer = DictImporter()
    path_edges = {}

    def find_numbers(dom_r_str):
        n = map(int, re.findall('\d+', dom_r_str))
        return n

    def nodenamefunc(node):
        node_idx = list(find_numbers(node.name))[0]
        node_sp = model.species[node_idx]
        node_name = parse_name(node_sp)
        return node_name

    def edgeattrfunc(node, child):
        return 'dir="back"'

    for keys, values in paths_labels.items():
        root = importer.import_(values)
        dot = DotExporter(root, graph='strict digraph', options=["rankdir=RL;"], nodenamefunc=nodenamefunc,
                          edgeattrfunc=edgeattrfunc)
        data = ''
        for line in dot:
            data += line
        pydot_graph = graph_from_dot_data(data)
        graph = from_pydot(pydot_graph[0])
        if type_analysis == 'production':
            graph = graph.reverse()
        edges = set(graph.edges())
        path_edges[keys] = edges

    path_diff = pd.DataFrame(index=paths_labels.keys(), columns=paths_labels.keys())
    for row in path_diff.columns:
        for col in path_diff.columns:
            path_diff.loc[row, col] = path_edges[row].difference(path_edges[col])
    return path_diff
Пример #19
0
    def __init__(self,
                 solver,
                 values_to_sample,
                 objective_function,
                 observable,
                 sens_type='initials',
                 sample_list=None):
        if not isinstance(solver, pysb.simulator.base.Simulator):
            raise TypeError("solver must be a pysb.simulator object")
        self._model = solver.model
        self._logger = get_logger(__name__, model=self._model)
        self._logger.info('%s created for observable %s' %
                          (self.__class__.__name__, observable))
        generate_equations(self._model)
        self._values_to_sample = values_to_sample
        self._solver = solver
        self.objective_function = objective_function
        self._observable = observable
        self._sens_type = sens_type
        if self._sens_type not in ('params', 'initials', 'all'):
            if sample_list is None:
                raise ValueError("Please provide 'sens_type' or 'sample_list'")
        if sample_list is not None:
            _valid_options = [i.name for i in self._model.parameters]
            for i in sample_list:
                if i not in _valid_options:
                    raise ValueError("{} not in model.parameters".format(i))
            self.index = sample_list
        elif self._sens_type == 'params':
            self.index = [i.name for i in self._model.parameters_rules()]
        elif self._sens_type == 'initials':
            self.index = [i[1].name for i in self._model.initial_conditions]
        elif self._sens_type == 'all':
            self.index = [i.name for i in self._model.parameters]

        self.orig_vals = [i.value for i in self._model.parameters]
        self.index_of_param = {
            i.name: n
            for n, i in enumerate(self._model.parameters)
        }
        self._n_sam = len(self._values_to_sample)
        self._n_species = len(self.index)
        self._nm = self._n_species * self._n_sam
        self._size_of_matrix = self._nm**2
        self._shape_of_matrix = self._nm, self._nm

        # Outputs
        self.b_matrix = []
        self.b_prime_matrix = []
        self.params_to_run = self._setup_simulations()
        self.p_prime_matrix = np.zeros(self._size_of_matrix)
        self.p_matrix = np.zeros(self._size_of_matrix)
        # Stores the objective function value for the original unperturbed
        # model
        self._objective_fn_standard = None
Пример #20
0
def _get_observable(obs, y):
    from pysb.bng import generate_equations
    generate_equations(model)
    obs_names = [ob.name for ob in model.observables]
    try:
        obs_idx = obs_names.index(obs)
    except ValueError:
        raise ValueError(obs + "doesn't exist in the model")
    sps = model.observables[obs_idx].species
    obs_values = np.sum(y[:, :, sps], axis=2)
    return obs_values.T
Пример #21
0
def run_one_model(model, data, prior_vals, ns, pool=None):
    # Vector of estimated parameters
    pe = [p for p in model.parameters if p.name.startswith('k')]

    # Generate model equations
    generate_equations(model)
    Solver._use_inline = True
    sol = Solver(model, numpy.linspace(0, 10, 10))
    sol.run()

    # Number of temperatures, dimensions and walkers
    ntemps = 20
    ndim = len(pe)
    blocksize = 48
    nwalkers = get_num_walkers(ndim, blocksize)
    print 'Running %d walkers at %d temperatures for %d steps.' %\
          (nwalkers, ntemps, ns)

    sampler = emcee.PTSampler(ntemps,
                              nwalkers,
                              ndim,
                              likelihood,
                              prior,
                              threads=1,
                              pool=pool,
                              betas=None,
                              a=2.0,
                              Tmax=None,
                              loglargs=[model, data],
                              logpargs=[model, prior_vals],
                              loglkwargs={},
                              logpkwargs={})

    # Random initial parameters for walkers
    p0 = numpy.ones((ntemps, nwalkers, ndim))
    for i in range(ntemps):
        for j in range(nwalkers):
            for k, pp in enumerate(pe):
                p0[i, j, k] = prior_vals.vals[j][pp.name]
    print p0
    # Run sampler
    fname = scratch_path + 'chain_%s.dat' % model.name
    step = 0
    for result in sampler.sample(p0, iterations=ns, storechain=True):
        print '---'
        position = result[0]
        with open(fname, 'a') as fh:
            for w in range(nwalkers):
                for t in range(ntemps):
                    pos_str = '\t'.join(['%f' % p for p in position[t][w]])
                    fh.write('%d\t%d\t%d\t%s\n' % (step, w, t, pos_str))
        step += 1
    return sampler
Пример #22
0
def create_model_files(model, model_name, directory=None):
    curr_dir = os.getcwd()
    if not model.odes:
        generate_equations(model)
    
    if directory != None:
        os.chdir(directory)
    
    file_basename = model_name+'_model'
    dill.dump(model, open(file_basename+'.p', 'wb'))
    
    os.chdir(curr_dir)
Пример #23
0
def test_synthesis_monomer_pattern():
    subj = Agent('KRAS')
    obj = Agent('BRAF')
    st1 = Activation(subj, obj)
    st2 = Synthesis(subj, obj)
    pa = PysbAssembler(policies='one_step')
    pa.add_statements([st1, st2])
    model = pa.make_model()
    assert(len(model.rules)==2)
    assert(len(model.monomers)==2)
    # This ensures that the synthesized BRAF monomer
    # is in its fully specified "base" state
    bng.generate_equations(model)
Пример #24
0
def build_all_models():
    mek_seq_rand = ['seq', 'random']
    mkp_seq_rand = ['seq', 'random']
    erk_dimerization = [False, 'any', 'uT', 'phos']
    mkp_activation = [False, True]

    model_combinations = itertools.product(*(mek_seq_rand, mkp_seq_rand,
                                             erk_dimerization, mkp_activation))

    models = {}
    for mc in model_combinations:
        model = build_model(*mc)
        generate_equations(model)
        models[model.name] = model
    return models
Пример #25
0
def build_all_models():
    mek_seq_rand = ['seq', 'random']
    mkp_seq_rand = ['seq', 'random']
    erk_dimerization = [False, 'any', 'uT', 'phos']
    mkp_activation = [False, True]

    model_combinations = itertools.product(*(mek_seq_rand, mkp_seq_rand,
                                             erk_dimerization, mkp_activation))

    models = {}
    for mc in model_combinations:
        model = build_model(*mc)
        generate_equations(model)
        models[model.name] = model
    return models
Пример #26
0
    def __init__(self, solver, values_to_sample, objective_function,
                 observable, sens_type='initials', sample_list=None):
        if not isinstance(solver, pysb.simulator.base.Simulator):
            raise TypeError("solver must be a pysb.simulator object")
        self._model = solver.model
        self._logger = get_logger(__name__, model=self._model)
        self._logger.info('%s created for observable %s' % (
            self.__class__.__name__, observable))
        generate_equations(self._model)
        self._values_to_sample = values_to_sample
        self._solver = solver
        self.objective_function = objective_function
        self._observable = observable
        self._sens_type = sens_type
        if self._sens_type not in ('params', 'initials', 'all'):
            if sample_list is None:
                raise ValueError("Please provide 'sens_type' or 'sample_list'")
        if sample_list is not None:
            _valid_options = [i.name for i in self._model.parameters]
            for i in sample_list:
                if i not in _valid_options:
                    raise ValueError("{} not in model.parameters".format(i))
            self.index = sample_list
        elif self._sens_type == 'params':
            self.index = [i.name for i in self._model.parameters_rules()]
        elif self._sens_type == 'initials':
            self.index = [i[1].name for i in self._model.initial_conditions]
        elif self._sens_type == 'all':
            self.index = [i.name for i in self._model.parameters]

        self.orig_vals = [i.value for i in self._model.parameters]
        self.index_of_param = {i.name: n for n, i in
                               enumerate(self._model.parameters)}
        self._n_sam = len(self._values_to_sample)
        self._n_species = len(self.index)
        self._nm = self._n_species * self._n_sam
        self._size_of_matrix = self._nm ** 2
        self._shape_of_matrix = self._nm, self._nm

        # Outputs
        self.b_matrix = []
        self.b_prime_matrix = []
        self.params_to_run = self._setup_simulations()
        self.p_prime_matrix = np.zeros(self._size_of_matrix)
        self.p_matrix = np.zeros(self._size_of_matrix)
        # Stores the objective function value for the original unperturbed
        # model
        self._objective_fn_standard = None
def print_model_stats(model):
    """ provides stats for the models

    :param model:
    """
    generate_equations(model)
    print("Information about model {0}".format(model.name))
    print("Number of rules {0}".format(len(model.rules)))
    print("Number of parameters {0}".format(len(model.parameters)))
    print(
    "Number of parameter rules {0}".format(len(model.parameters_rules())))
    print("Number of reactions {0}".format(len(model.reactions)))
    print(
    "Number of initial conditions {0}".format(len(model.initial_conditions)))
    print("Number of species {0}".format(len(model.species)))
    print('{}'.format('-' * 24))
Пример #28
0
def convert_odes(model, p_name_map, s_name_map_by_pattern):
    """Substitutes species and parameter names using the given name mappings.

    Parameters
    ----------
    model : pysb.core.Model
        The model to be converted.
    p_name_map : dict
        A dict where the keys are the parameter names of the PySB model and the
        values are the parameter names of the original model, e.g.
        {'bind_BidT_Bcl2_kf': 'ka_tBid_Bcl2'}
    s_name_map : dict
        A dict where the keys are the string representations of the species
        from the PySB model, generated by calling str(species), and the values
        are the species names in the original model, e.g.
        {'Bid(bf=None, state=T)': 'Act'}

    Returns
    -------
    A list of strings, with one entry for each ODE in the model. Each ODE
    is represented as a string, e.g. "d[Act]/dt = ..."
    """

    generate_equations(model)

    # Get the index of each species
    s_name_map_by_num = {}
    for i, s in enumerate(model.species):
        for key in s_name_map_by_pattern:
            if key == str(s):
                s_name_map_by_num['s%d' % i] = s_name_map_by_pattern[key]

    name_map = {}
    name_map.update(p_name_map)
    name_map.update(s_name_map_by_num)

    # Substitute new names into the ODEs
    ode_list = {} 
    for i, ode in enumerate(model.odes):
        new_ode = ode.subs(name_map)
        ode_species = s_name_map_by_pattern[str(model.species[i])]
        ode_list[ode_species] = str(new_ode)
        #new_ode = 'd[%s]/dt = %s' % (s_name_map_by_num['s%d' % i], str(new_ode))
        #ode_list.append(new_ode)

    return ode_list
Пример #29
0
 def __init__(self, model, tspan=None, initials=None,
              param_values=None, verbose=False, **kwargs):
     super(StochKitSimulator, self).__init__(model,
                                             tspan=tspan,
                                             initials=initials,
                                             param_values=param_values,
                                             verbose=verbose,
                                             **kwargs)
     self.cleanup = kwargs.pop('cleanup', True)
     if kwargs:
         raise ValueError('Unknown keyword argument(s): {}'.format(
             ', '.join(kwargs.keys())
         ))
     self._outdir = None
     generate_equations(self._model,
                        cleanup=self.cleanup,
                        verbose=self.verbose)
Пример #30
0
def convert_odes(model, p_name_map, s_name_map_by_pattern):
    """Substitutes species and parameter names using the given name mappings.

    Parameters
    ----------
    model : pysb.core.Model
        The model to be converted.
    p_name_map : dict
        A dict where the keys are the parameter names of the PySB model and the
        values are the parameter names of the original model, e.g.
        {'bind_BidT_Bcl2_kf': 'ka_tBid_Bcl2'}
    s_name_map : dict
        A dict where the keys are the string representations of the species
        from the PySB model, generated by calling str(species), and the values
        are the species names in the original model, e.g.
        {'Bid(bf=None, state=T)': 'Act'}

    Returns
    -------
    List of strings
        One string entry for each ODE in the model. Each ODE
        is represented as a string, e.g. "d[Act]/dt = ..."
    """

    generate_equations(model)

    # Get the index of each species
    s_name_map_by_num = {}
    for i, s in enumerate(model.species):
        for key in s_name_map_by_pattern:
            if key == str(s):
                s_name_map_by_num['s%d' % i] = s_name_map_by_pattern[key]

    name_map = {}
    name_map.update(p_name_map)
    name_map.update(s_name_map_by_num)

    # Substitute new names into the ODEs
    ode_list = {} 
    for i, ode in enumerate(model.odes):
        new_ode = ode.subs(name_map)
        ode_species = s_name_map_by_pattern[str(model.species[i])]
        ode_list[ode_species] = str(new_ode)

    return ode_list
Пример #31
0
 def __init__(self,
              model,
              tspan=None,
              initials=None,
              param_values=None,
              verbose=False,
              **kwargs):
     super(StochKitSimulator, self).__init__(model,
                                             tspan=tspan,
                                             initials=initials,
                                             param_values=param_values,
                                             verbose=verbose,
                                             **kwargs)
     self.cleanup = kwargs.get('cleanup', True)
     self._outdir = None
     generate_equations(self._model,
                        cleanup=self.cleanup,
                        verbose=self.verbose)
Пример #32
0
def run_one_model(model, data, prior_vals, ns, pool=None):
    # Vector of estimated parameters
    pe = [p for p in model.parameters if p.name.startswith('k')]

    # Generate model equations
    generate_equations(model)
    Solver._use_inline = True
    sol = Solver(model, numpy.linspace(0,10,10))
    sol.run()

    # Number of temperatures, dimensions and walkers
    ntemps = 20
    ndim = len(pe)
    blocksize = 48
    nwalkers = get_num_walkers(ndim, blocksize)
    print 'Running %d walkers at %d temperatures for %d steps.' %\
          (nwalkers, ntemps, ns)

    sampler = emcee.PTSampler(ntemps, nwalkers, ndim, likelihood, prior,
         threads=1, pool=pool, betas=None, a=2.0, Tmax=None,
         loglargs=[model, data], logpargs=[model, prior_vals],
         loglkwargs={}, logpkwargs={})

    # Random initial parameters for walkers
    p0 = numpy.ones((ntemps, nwalkers, ndim))
    for i in range(ntemps):
        for j in range(nwalkers):
            for k, pp in enumerate(pe):
                p0[i, j, k] = prior_vals.vals[j][pp.name]
    print p0
    # Run sampler
    fname = scratch_path + 'chain_%s.dat' % model.name
    step = 0
    for result in sampler.sample(p0, iterations=ns, storechain=True):
        print '---'
        position = result[0]
        with open(fname, 'a') as fh:
            for w in range(nwalkers):
                for t in range(ntemps):
                    pos_str = '\t'.join(['%f' % p for p in position[t][w]])
                    fh.write('%d\t%d\t%d\t%s\n' % (step, w, t, pos_str))
        step += 1
    return sampler
Пример #33
0
def compare_odes(model, p_name_map, s_index_map, m_ode_list):
    """Compare the PySB model ODEs to the original MATLAB ODEs."""
    s_name_map = [('s%d' % i, 'x(%d)' % j) for i, j in enumerate(s_index_map)]
    name_map = {}
    name_map.update(p_name_map)
    name_map.update(s_name_map)
    generate_equations(model)
    ode_list = []
    result_list = []
    for i, ode in enumerate(model.odes):
        new_ode = ode.subs(name_map)
        old_ode = m_ode_list[s_index_map[i] - 1]
        result = new_ode == old_ode
        result_list.append(result)
        if not result:
            print "Mismatch for species " + str(i) + ": "
            print "Pysb ODE   : %s" % str(new_ode)
            print "Matlab ODE : %s" % str(old_ode)

    return result_list
Пример #34
0
def check_all_species_generated(model):
    """Check the lists of rules and triggering species match BNG"""
    generate_equations(model)
    spm = SpeciesPatternMatcher(model)

    # Get the rules and species firing them using the pattern matcher
    species_produced = dict()
    for rule, rp_species in spm.rule_firing_species().items():
        species_produced[rule.name] = set()
        for sp_list in rp_species:
            for sp in sp_list:
                species_produced[rule.name].add(model.get_species_index(sp))

    # Get the equivalent dictionary of {rule name: [reactant species]} from BNG
    species_bng = collections.defaultdict(set)
    for rxn in model.reactions:
        for rule in rxn['rule']:
            species_bng[rule].update(rxn['reactants'])

    # Our output should match BNG
    assert species_produced == species_bng
Пример #35
0
def test_species_pattern_matcher():
    # See also SpeciesPatternMatcher doctests

    # Check that SpeciesPatternMatcher raises exception if model has no species
    model = robertson.model
    model.reset_equations()
    assert_raises(Exception, SpeciesPatternMatcher, model)

    model = bax_pore.model
    generate_equations(model)
    spm = SpeciesPatternMatcher(model)
    BAX = model.monomers['BAX']
    sp_sets = spm.species_fired_by_reactant_pattern(
        as_reaction_pattern(BAX(t1=None, t2=None)))
    assert len(sp_sets) == 1
    assert len(sp_sets[0]) == 2

    sp_sets = spm.species_fired_by_reactant_pattern(
        as_reaction_pattern(BAX(t1=WILD, t2=ANY)))
    assert len(sp_sets) == 1
    assert len(sp_sets[0]) == 10
Пример #36
0
    def encode_model(cls, model):
        d = super(PySBJSONWithNetworkEncoder, cls).encode_model(model)

        # Ensure network generation has taken place
        generate_equations(model)

        additional_encoders = {
            '_derived_parameters': cls.encode_parameter,
            '_derived_expressions': cls.encode_expression,
            'reactions': cls.encode_reaction,
            'reactions_bidirectional': cls.encode_reaction,
            'species': cls.encode_complex_pattern
        }

        for component_type, encoder in additional_encoders.items():
            d[component_type] = [
                encoder(component)
                for component in getattr(model, component_type)
            ]

        return d
Пример #37
0
def rate_2_interactions(model, rate):
    """
    Obtains the interacting protein from a rection rate
    Parameters
    ----------
    model : PySB model
    rate : str
    Returns
    -------

    """

    generate_equations(model)
    species_idxs = re.findall('(?<=__s)\d+', rate)
    species_idxs = [int(i) for i in species_idxs]
    if len(species_idxs) == 1:
        interaction = parse_name(model.species[species_idxs[0]])
    else:
        sp_monomers = {sp: model.species[sp].monomer_patterns for sp in species_idxs}
        sorted_intn = sorted(sp_monomers.items(), key=lambda value: len(value[1]))
        interaction = ", ".join(parse_name(model.species[mons[0]]) for mons in sorted_intn[:2])
    return interaction
Пример #38
0
def check_all_species_generated(model):
    """Check the lists of rules and triggering species match BNG"""
    generate_equations(model)
    spm = SpeciesPatternMatcher(model)

    # Get the rules and species firing them using the pattern matcher
    species_produced = dict()
    for rule, rp_species in spm.rule_firing_species().items():
        species_produced[rule.name] = set()
        for sp_list in rp_species:
            for sp in sp_list:
                species_produced[rule.name].add(
                    model.get_species_index(sp))

    # Get the equivalent dictionary of {rule name: [reactant species]} from BNG
    species_bng = collections.defaultdict(set)
    for rxn in model.reactions:
        for rule in rxn['rule']:
            species_bng[rule].update(rxn['reactants'])

    # Our output should match BNG
    assert species_produced == species_bng
Пример #39
0
def test_species_pattern_matcher():
    # See also SpeciesPatternMatcher doctests

    # Check that SpeciesPatternMatcher raises exception if model has no species
    model = robertson.model
    model.reset_equations()
    assert_raises(Exception, SpeciesPatternMatcher, model)

    model = bax_pore.model
    generate_equations(model)
    spm = SpeciesPatternMatcher(model)
    BAX = model.monomers['BAX']
    sp_sets = spm.species_fired_by_reactant_pattern(
        as_reaction_pattern(BAX(t1=None, t2=None))
    )
    assert len(sp_sets) == 1
    assert len(sp_sets[0]) == 2

    sp_sets = spm.species_fired_by_reactant_pattern(
        as_reaction_pattern(BAX(t1=WILD, t2=ANY))
    )
    assert len(sp_sets) == 1
    assert len(sp_sets[0]) == 10
Пример #40
0
import chen_2009_original_sbml
from pysb.integrate import Solver
from pysb.bng import generate_equations
import numpy as np
import matplotlib.pyplot as plt
import sympy

# Replicate matlab simulation using PySB simulation code.

model = chen_2009_original_sbml.pysb_model()

generate_equations(model)
for i in (0, 388, 334):
    model.odes[i] = sympy.numbers.Zero()

tspan = np.linspace(0, 9000, 9001)
solver = Solver(model, tspan, atol=1e-6, rtol=1e-8)
solver.run()

plt.figure()
for i, (arr, obs, color) in enumerate(
    [(solver.yexpr, "pErbB1", "b"), (solver.yobs, "pERK", "g"), (solver.yobs, "pAKT", "r")]
):
    plt.subplot(3, 1, i + 1)
    plt.plot(tspan, arr[obs], c=color, label=obs)
plt.show()
 def __init__(self, model, tspan=None, cleanup=True, verbose=False):
     super(StochKitSimulator, self).__init__(model, tspan, verbose)
     generate_equations(self.model, cleanup, self.verbose)
Пример #42
0
 def time_egfr_equations_max_iter_8(self):
     generate_equations(model, max_iter=8)
     # Check model ODEs are generated
     [x for x in model.odes]
Пример #43
0
def test_generate_equations():
    st = Phosphorylation(Agent('MAP2K1'), Agent('MAPK3'))
    pa = PysbAssembler()
    pa.add_statements([st])
    pa.make_model()
    bng.generate_equations(pa.model)
Пример #44
0
    # Sort names and reactions by names.
    descriptions, reactions = zip(*sorted(zip(descriptions, reactions)))

    return descriptions, reactions


argparser = argparse.ArgumentParser()
argparser.add_argument('-m',
                       '--print-matches',
                       action='store_true',
                       help="Print matching elements too (mismatches are "
                       "always printed)")
args = argparser.parse_args(sys.argv[1:])

pysb_model = rasmodel.chen_2009.model
generate_equations(pysb_model)

rasmodel.chen_2009.original_sbml.load_model()
sbml_model = rasmodel.chen_2009.original_sbml.model

pysb_species_names, pysb_species = get_pysb_species()
sbml_species_names, sbml_species = get_sbml_species()

if len(pysb_species_names) != len(set(pysb_species_names)):
    raise RuntimeError("Duplicate pysb species names")
if len(sbml_species_names) != len(set(sbml_species_names)):
    raise RuntimeError("Duplicate sbml species names")

# Count matches.
species_matches = reaction_matches = 0
Пример #45
0
def test_non_python_name_bind():
    st = Complex([Agent('14-3-3'), Agent('BRAF kinase')])
    pa = PysbAssembler()
    pa.add_statements([st])
    pa.make_model()
    bng.generate_equations(pa.model)
def run_cupsoda(tspan, model, simulations, vol, name, mem):
    tpb = 32

    solver = CupSodaSimulator(model, tspan, atol=ATOL, rtol=RTOL, gpu=0,
                              max_steps=mxstep, verbose=False, vol=vol,
                              obs_species_only=True,
                              memory_usage=mem_dict[mem])

    cols = ['model', 'nsims', 'tpb', 'mem', 'cupsodatime', 'cupsoda_io_time',
            'pythontime', 'rtol', 'atol', 'mxsteps', 't_end', 'n_steps', 'vol',
            'card']
    generate_equations(model)
    nominal_values = np.array([p.value for p in model.parameters])
    all_output = []
    for num_particles in simulations:
        # set the number of blocks to make it 16 threads per block
        n_blocks = int(np.ceil(1. * num_particles / tpb))
        solver.n_blocks = n_blocks

        # create a matrix of initial conditions
        c_matrix = np.zeros((num_particles, len(nominal_values)))
        c_matrix[:, :] = nominal_values
        y0 = np.zeros((num_particles, len(model.species)))
        for ic in model.initial_conditions:
            for j in range(len(model.species)):
                if str(ic[0]) == str(model.species[j]):
                    y0[:, j] = ic[1].value
                    break

        # setup a unique log output file to extract timing from
        log_file = 'logfile_{}.log'.format(num_particles)
        # remove it if it already exists (happens if rerunning simulation)
        if os.path.exists(log_file):
            os.remove(log_file)

        setup_logger(logging.INFO, file_output=log_file, console_output=True)
        start_time = time.time()

        # run the simulations
        x = solver.run(param_values=c_matrix, initials=y0)

        end_time = time.time()

        # create an emtpy list to put all data for this run
        out_list = list()
        out_list.append(name)
        out_list.append(num_particles)
        out_list.append(str(tpb))
        out_list.append(mem)
        cupsoda_time = 'error'
        total_time = 'error'
        with open(log_file, 'r') as f:
            for line in f:
                if 'reported time' in line:
                    good_line = line.split(':')[-1].split()[0]
                    cupsoda_time = float(good_line)
                if 'I/O time' in line:
                    good_line = line.split(':')[-1].split()[0]
                    total_time = float(good_line)
            f.close()

        out_list.append(cupsoda_time)
        out_list.append(total_time)
        out_list.append(end_time - start_time)
        out_list.append(RTOL)
        out_list.append(ATOL)
        out_list.append(len(tspan))
        out_list.append(mxstep)
        out_list.append(np.max(tspan))
        out_list.append(vol)
        out_list.append(card)
        all_output.append(out_list)
        print(" ".join(str(i) for i in out_list))
        print('out==')
        print(x.observables[0][0])
        print(x.observables[0][-1])
        print('==out\n')

    df = pd.DataFrame(all_output, columns=cols)
    print(df)
    df.to_csv('{}_cupsoda_timings_{}.csv'.format(name, mem), index=False)
    return df
Пример #47
0
Rule('IKKa_and_IkBa', IKKa() + IkBac() >> IKKa() + IkBa_p(), a2)
Rule('A20t_to_A20', A20t() >> A20t() + A20(), c4)
Rule('A20_on_to_A20t', A20_on() >> A20_on() + A20t(), c1)
Rule('IkBat_to_IkBan', IkBat() >> IkBat() + IkBac(), c4)
Rule('IkBac_to_IkBan', IkBac() >> IkBan(), i1a)
Rule('IkBan_to_IkBac', IkBan() >> IkBac(), e1a)
Rule('IkBa_on_to_IkBat', IkBa_on() >> IkBa_on() + IkBat(), c1a)
Rule('IkBan_NFkBn_to_IkBac_NFkBc', IkBan_NFkBn() >> IkBac_NFkBc(), e2a)
Rule('IkBan_and_A20_on', IkBan() + A20_on() >> IkBan() + A20_off(), q2)
Rule('IkBan_and_IkBa_on', IkBan() + IkBa_on() >> IkBan() + IkBa_off(), q2)
Rule('IKKii_to_IKKn', IKKii() >> IKKn(), k4)
Rule('NFkBn_and_A20_off', NFkBn() + A20_off() >> NFkBn() + A20_on(), q1)
Rule('NFkBn_and_IkBa_off', NFkBn() + IkBa_off() >> NFkBn() + IkBa_on(), q1)

if __name__ == "__main__":
    generate_equations(model, verbose=True)
    # odes_unstruct = [(i,":",odes)for i,odes in enumerate(model.odes)]

    #
    # for i,sp in enumerate(model.species):
    #     print i,":",sp
    # # print
    # # for i,rxn in enumerate(model.reactions):
    # #     print i,":",rxn
    # # print
    # myodes = []
    # for i,ode in enumerate(model.odes):
    #     myodes.append(i, odes)
    # #     print myodes # i,":",ode

    # Simulate the model
Пример #48
0
    # Add tBid initial condition to mito only model
    Bid = m_mito.all_components()["Bid"]
    try:
        p = Parameter("tBid_0", 1, _export=False)
        m_mito.initial(Bid(state="T", bf=None), p)
        m_mito.add_component(p)
    except Exception:
        pass  # Duplicate initial condition for tBid

    if model in ["chen_2007_febs_direct", "howells"]:
        Bad = m_mito.all_components()["Bad"]
        p = Parameter("mBad_0", 1, _export=False)
        m_mito.initial(Bad(state="M", bf=None, serine="U"), p)
        m_mito.add_component(p)

    generate_equations(m_mito)
    generate_equations(m_full)
    print(
        "%-24s %11d %10d %12d %11d %10d %12d"
        % (
            model,
            len(m_mito.rules),
            len(m_mito.odes),
            len(m_mito.parameters),
            len(m_full.rules),
            len(m_full.odes),
            len(m_full.parameters),
        )
    )

# Full models
        descriptions.append(desc)

    # Sort names and reactions by names.
    descriptions, reactions = zip(*sorted(zip(descriptions, reactions)))

    return descriptions, reactions


argparser = argparse.ArgumentParser()
argparser.add_argument('-m', '--print-matches', action='store_true',
                       help="Print matching elements too (mismatches are "
                       "always printed)")
args = argparser.parse_args(sys.argv[1:])

pysb_model = rasmodel.chen_2009.model
generate_equations(pysb_model)

rasmodel.chen_2009.original_sbml.load_model()
sbml_model = rasmodel.chen_2009.original_sbml.model

pysb_species_names, pysb_species = get_pysb_species()
sbml_species_names, sbml_species = get_sbml_species()

if len(pysb_species_names) != len(set(pysb_species_names)):
    raise RuntimeError("Duplicate pysb species names")
if len(sbml_species_names) != len(set(sbml_species_names)):
    raise RuntimeError("Duplicate sbml species names")

# Count matches.
species_matches = reaction_matches = 0
Пример #50
0
def generate_equations(model, pkl_cache):
    bng.generate_equations(model, verbose=True)
    with open(pkl_cache, 'w') as fh:
        pickle.dump(model, fh)
Пример #51
0
# alpha = alpha1 + alpha2 + alpha3 + alpha4 + alpha5 + alpha6 + alpha7 + alpha8
#
#
#
# alpha1 = 1
# alpha2 = 4*Cr*Ka**2*Kr2*NnpD2*RnpD2
# alpha3 = 2*Kr*RnpD*rR
# alpha4 = 2*Cr*Kr2*RnpD2*rR2
# alpha5 = 2*Ka*Kr*NnpD*RnpD*(1+2*Cr*Kr*RnpD*rR)
# alpha6 = 3*HdnpD*Kn*rNbox*(1+2*Cnr*Kr*RnpD*(Ka*NnpD+2*Cr*Ka2*Kr*NnpD2*RnpD+rR+2*Cr*Ka*Kr*NnpD*RnpD*rR+Cr*Kr*RnpD*rR2))
# alpha7 = 6*Cn*HdnpD2*Kn2*rNbox2*(1+2*Cnr*Kr*RnpD*(Ka*NnpD+2*Cr*Ka2*Kr*NnpD2*RnpD+rR+2*Cr*Ka*Kr*NnpD*RnpD*rR+Cr*Kr*RnpD*rR2))
# alpha8 = 6*Cn2*HdnpD3*Kn3*rNbox3*(1+2*Cnr*Kr*RnpD*(Ka*NnpD+2*Cr*Ka2*Kr*NnpD2*RnpD+rR+2*Cr*Ka*Kr*NnpD*RnpD*rR+Cr*Kr*RnpD*rR2)))

#fixv, fix value

bng.generate_equations(model)

x = 1

print(model.species[x])
print(list(model.odes)[x])

verbose = False
if verbose:
    print(model.odes)
    print('BREAK')
    print(len(model.rules))
    print(len(model.reactions))
    print(model.rules)
    print('BREAK2')
    print(model.reactions)
Пример #52
0
 def __init__(self, model, tspan=None, mesh=None, initial_dist=None, cleanup=True, verbose=False):
     super(PyurdmeSimulator, self).__init__(model, tspan, verbose)
     generate_equations(self.model, cleanup, self.verbose)
Пример #53
0
    def export(self, initials=None, param_values=None):
        """Generate the corresponding StochKit2 XML for a PySB model

        Parameters
        ----------
        initials : list of numbers
            List of initial species concentrations overrides
            (must be same length as model.species). If None,
            the concentrations from the model are used.
        param_values : list
            List of parameter value overrides (must be same length as
            model.parameters). If None, the parameter values from the model
            are used.

        Returns
        -------
        string
            The model in StochKit2 XML format
        """
        if self.model.compartments:
            raise CompartmentsNotSupported()

        generate_equations(self.model)
        document = etree.Element("Model")

        d = etree.Element('Description')

        d.text = 'Exported from PySB Model: %s' % self.model.name
        document.append(d)

        # Number of Reactions
        nr = etree.Element('NumberOfReactions')
        nr.text = str(len(self.model.reactions))
        document.append(nr)

        # Number of Species
        ns = etree.Element('NumberOfSpecies')
        ns.text = str(len(self.model.species))
        document.append(ns)

        if param_values is None:
            # Get parameter values from model if not supplied
            param_values = [p.value for p in self.model.parameters]
        else:
            # Validate length
            if len(param_values) != len(self.model.parameters):
                raise Exception('param_values must be a list of numeric '
                                'parameter values the same length as '
                                'model.parameters')

        # Get initial species concentrations from model if not supplied
        if initials is None:
            initials = np.zeros((len(self.model.species),))
            subs = dict((p, param_values[i]) for i, p in
                        enumerate(self.model.parameters))

            for ic in self.model.initials:
                cp = as_complex_pattern(ic.pattern)
                si = self.model.get_species_index(cp)
                if si is None:
                    raise IndexError("Species not found in model: %s" %
                                     repr(cp))
                if ic.value in self.model.parameters:
                    pi = self.model.parameters.index(ic.value)
                    value = param_values[pi]
                elif ic.value in self.model.expressions:
                    value = ic.value.expand_expr().evalf(subs=subs)
                else:
                    raise ValueError(
                        "Unexpected initial condition value type")
                initials[si] = value
        else:
            # Validate length
            if len(initials) != len(self.model.species):
                raise Exception('initials must be a list of numeric initial '
                                'concentrations the same length as '
                                'model.species')

        # Species
        spec = etree.Element('SpeciesList')
        for s_id in range(len(self.model.species)):
            spec.append(self._species_to_element('__s%d' % s_id,
                                                 initials[s_id]))
        document.append(spec)

        # Parameters
        params = etree.Element('ParametersList')
        for p_id, param in enumerate(self.model.parameters):
            p_name = param.name
            if p_name == 'vol':
                p_name = '__vol'
            p_value = param.value if param_values is None else \
                param_values[p_id]
            params.append(self._parameter_to_element(p_name, p_value))
        # Default volume parameter value
        params.append(self._parameter_to_element('vol', 1.0))

        document.append(params)

        # Expressions and observables
        expr_strings = {
            e.name: '(%s)' % sympy.ccode(
                e.expand_expr(expand_observables=True)
            )
            for e in self.model.expressions
        }

        # Reactions
        reacs = etree.Element('ReactionsList')
        pattern = re.compile("(__s\d+)\*\*(\d+)")
        for rxn_id, rxn in enumerate(self.model.reactions):
            rxn_name = 'Rxn%d' % rxn_id
            rxn_desc = 'Rules: %s' % str(rxn["rule"])

            reactants = defaultdict(int)
            products = defaultdict(int)
            # reactants
            for r in rxn["reactants"]:
                reactants["__s%d" % r] += 1
            # products
            for p in rxn["products"]:
                products["__s%d" % p] += 1
            # replace terms like __s**2 with __s*(__s-1)
            rate = str(rxn["rate"])

            matches = pattern.findall(rate)
            for m in matches:
                repl = m[0]
                for i in range(1, int(m[1])):
                    repl += "*(%s-%d)" % (m[0], i)
                rate = re.sub(pattern, repl, rate, count=1)

            # expand only expressions used in the rate eqn
            for e in {sym for sym in rxn["rate"].atoms()
                      if isinstance(sym, Expression)}:
                rate = re.sub(r'\b%s\b' % e.name,
                              expr_strings[e.name],
                              rate)

            total_reactants = sum(reactants.values())
            rxn_params = rxn["rate"].atoms(Parameter)
            rate = None
            if total_reactants <= 2 and len(rxn_params) == 1:
                # Try to parse as mass action to avoid compiling custom
                # propensity functions in StochKit (slow for big models)
                rxn_param = rxn_params.pop()
                putative_rate = sympy.Mul(*[sympy.symbols(r) ** r_stoich for
                                            r, r_stoich in
                                            reactants.items()]) * rxn_param

                rxn_floats = rxn["rate"].atoms(sympy.Float)
                rate_mul = 1.0
                if len(rxn_floats) == 1:
                    rate_mul = next(iter(rxn_floats))
                    putative_rate *= rate_mul

                if putative_rate == rxn["rate"]:
                    # Reaction is mass-action, set rate to a Parameter or float
                    if len(rxn_floats) == 0:
                        rate = rxn_param
                    elif len(rxn_floats) == 1:
                        rate = rxn_param.value * float(rate_mul)

                    if rate is not None and len(reactants) == 1 and \
                            max(reactants.values()) == 2:
                        # Need rate * 2 in addition to any rate factor
                        rate = (rate.value if isinstance(rate, Parameter)
                                else rate) * 2.0

            if rate is None:
                # Custom propensity function needed

                rxn_atoms = rxn["rate"].atoms()

                # replace terms like __s**2 with __s*(__s-1)
                rate = str(rxn["rate"])

                matches = pattern.findall(rate)
                for m in matches:
                    repl = m[0]
                    for i in range(1, int(m[1])):
                        repl += "*(%s-%d)" % (m[0], i)
                    rate = re.sub(pattern, repl, rate, count=1)

                # expand only expressions used in the rate eqn
                for e in {sym for sym in rxn_atoms
                          if isinstance(sym, Expression)}:
                    rate = re.sub(r'\b%s\b' % e.name,
                                  expr_strings[e.name],
                                  rate)

            reacs.append(self._reaction_to_element(rxn_name,
                                                   rxn_desc,
                                                   rate,
                                                   reactants,
                                                   products))
        document.append(reacs)

        if pretty_print:
            return etree.tostring(document, pretty_print=True).decode('utf8')
        else:
            # Hack to print pretty xml without pretty-print
            # (requires the lxml module).
            doc = etree.tostring(document)
            xmldoc = xml.dom.minidom.parseString(doc)
            uglyXml = xmldoc.toprettyxml(indent='  ')
            text_re = re.compile(">\n\s+([^<>\s].*?)\n\s+</", re.DOTALL)
            prettyXml = text_re.sub(">\g<1></", uglyXml)
            return prettyXml
Пример #54
0
def test_generate_equations():
    st = Phosphorylation(Agent('MAP2K1'), Agent('MAPK3'))
    pa = PysbAssembler()
    pa.add_statements([st])
    pa.make_model()
    bng.generate_equations(pa.model)
Пример #55
0
Initial(BclXL(bf=None), Parameter('BclXL_0', 1))
Initial(BclW(bf=None), Parameter('BclW_0', 1))
Initial(Mcl1(bf=None), Parameter('Mcl1_0', 1))
Initial(Bfl1(bf=None), Parameter('Bfl1_0', 1))

# This is the code shown for "Example Macro Call" (not printed here)
bind_table([[Bcl2, BclXL, BclW, Mcl1, Bfl1], [Bid, 66, 12, 10, 10, 53],
            [Bim, 10, 10, 38, 10, 73], [Bad, 11, 10, 60, None, None],
            [Bik, 151, 10, 17, 109, None], [Noxa, None, None, None, 19, None],
            [Hrk, None, 92, None, None, None], [Puma, 18, 10, 25, 10, 59],
            [Bmf, 24, 10, 11, 23, None]],
           'bf',
           'bf',
           kf=1e3)

generate_equations(model)

num_rules = len(model.rules)
num_odes = len(model.odes)

print "BNGL Rules"
print "=========="
print num_rules, "rules"
print
print "ODEs"
print "===="
print num_odes, "ODEs"


def test_fig2c():
    assert num_rules == 28, "number of rules not as expected"
Пример #56
0
def test_non_python_name_bind():
    st = Complex([Agent('14-3-3'), Agent('BRAF kinase')])
    pa = PysbAssembler()
    pa.add_statements([st])
    pa.make_model()
    bng.generate_equations(pa.model)
Пример #57
0
def export_sbgn(model):
    """Return an SBGN model string corresponding to the PySB model.

    This function first calls generate_equations on the PySB model to obtain
    a reaction network (i.e. individual species, reactions). It then iterates
    over each reaction and and instantiates its reactants, products, and the
    process itself as SBGN glyphs and arcs.

    Parameters
    ----------
    model : pysb.core.Model
        A PySB model to be exported into SBGN

    Returns
    -------
    sbgn_str : str
        An SBGN model as string
    """
    import lxml.etree
    import lxml.builder
    from pysb.bng import generate_equations
    from indra.assemblers.sbgn import SBGNAssembler

    logger.info('Generating reaction network with BNG for SBGN export. ' +
                'This could take a long time.')
    generate_equations(model)

    sa = SBGNAssembler()

    glyphs = {}
    for idx, species in enumerate(model.species):
        glyph = sa._glyph_for_complex_pattern(species)
        if glyph is None:
            continue
        sa._map.append(glyph)
        glyphs[idx] = glyph
    for reaction in model.reactions:
        # Get all the reactions / products / controllers of the reaction
        reactants = set(reaction['reactants']) - set(reaction['products'])
        products = set(reaction['products']) - set(reaction['reactants'])
        controllers = set(reaction['reactants']) & set(reaction['products'])
        # Add glyph for reaction
        process_glyph = sa._process_glyph('process')
        # Connect reactants with arcs
        if not reactants:
            glyph_id = sa._none_glyph()
            sa._arc('consumption', glyph_id, process_glyph)
        else:
            for r in reactants:
                glyph = glyphs.get(r)
                if glyph is None:
                    glyph_id = sa._none_glyph()
                else:
                    glyph_id = glyph.attrib['id']
                sa._arc('consumption', glyph_id, process_glyph)
        # Connect products with arcs
        if not products:
            glyph_id = sa._none_glyph()
            sa._arc('production', process_glyph, glyph_id)
        else:
            for p in products:
                glyph = glyphs.get(p)
                if glyph is None:
                    glyph_id = sa._none_glyph()
                else:
                    glyph_id = glyph.attrib['id']
                sa._arc('production', process_glyph, glyph_id)
        # Connect controllers with arcs
        for c in controllers:
            glyph = glyphs[c]
            sa._arc('catalysis', glyph.attrib['id'], process_glyph)

    sbgn_str = sa.print_model().decode('utf-8')
    return sbgn_str
Пример #58
0
 def __init__(self, model):
     self._model = model
     generate_equations(self._model)
Пример #59
0
 def __init__(self, model, tspan=None, cleanup=True, verbose=False):
     super(StochKitSimulator, self).__init__(model, tspan, verbose)
     generate_equations(self.model, cleanup, self.verbose)