示例#1
0
def capitulo_7():
    file = open("resultados_capitulo_7.txt", "w")
    model = create_test_model("textbook")
    s = sample(model, 100)
    s.head()
    print("One process:")
    s = sample(model, 1000)
    print("Two processes:")
    s = sample(model, 1000, processes=2)
    s = sample(model, 100, method="achr")
    from cobra.flux_analysis.sampling import OptGPSampler, ACHRSampler
    achr = ACHRSampler(model, thinning=10)
    optgp = OptGPSampler(model, processes=4)
    s1 = achr.sample(100)
    s2 = optgp.sample(100)
    import numpy as np
    bad = np.random.uniform(-1000, 1000, size=len(model.reactions))
    achr.validate(np.atleast_2d(bad))
    achr.validate(s1)
    counts = [
        np.mean(s.Biomass_Ecoli_core > 0.1) for s in optgp.batch(100, 10)
    ]
    file.write("Usually {:.2f}% +- {:.2f}% grow...".format(
        np.mean(counts) * 100.0,
        np.std(counts) * 100.0))
    file.write("\n")
    co = model.problem.Constraint(
        model.reactions.Biomass_Ecoli_core.flux_expression, lb=0.1)
    model.add_cons_vars([co])
    s = sample(model, 10)
    file.write(s.Biomass_Ecoli_core)
    file.write("\n")
    file.close()
示例#2
0
def fluxSampling(model):
    from cobra.test import create_test_model
    from cobra.flux_analysis import sample
    model = create_test_model("textbook")
    s = sample(model, 100)  #number of samples to generate
    s.head()
    s = sample(model, 1000)
    s

    #The sampling process can be controlled on a lower level by using the sampler classes directly.
    from cobra.flux_analysis.sampling import OptGPSampler, ACHRSampler
    achr = ACHRSampler(
        model, thinning=10
    )  #“Thinning” means only recording samples every n iterations. A higher thinning factor means less correlated samples but also larger computation times.
    optgp = OptGPSampler(
        model, processes=4
    )  #an additional processes argument specifying how many processes are used to create parallel sampling chains.

    #For OptGPSampler the number of samples should be a multiple of the number of
    # processes, otherwise it will be increased to the nearest multiple automatically.
    s1 = achr.sample(100)
    s2 = optgp.sample(100)

    # Sampling and validation
    import numpy as np
    bad = np.random.uniform(-1000, 1000, size=len(model.reactions))
    achr.validate(np.atleast_2d(bad))  #should not be feasible
    achr.validate(s1)

    # Batch sampling
    counts = [
        np.mean(s.Biomass_Ecoli_core > 0.1) for s in optgp.batch(100, 10)
    ]
    print("Usually {:.2f}% +- {:.2f}% grow...".format(
        np.mean(counts) * 100.0,
        np.std(counts) * 100.0))

    # Adding constraints
    co = model.problem.Constraint(
        model.reactions.Biomass_Ecoli_core.flux_expression, lb=0.1)
    model.add_cons_vars([co])

    # Note that this is only for demonstration purposes. usually you could set
    # the lower bound of the reaction directly instead of creating a new constraint.
    s = sample(model, 10)
    print(s.Biomass_Ecoli_core)