def test_variables_samples(achr: ACHRSampler) -> None: """Test variable samples.""" vnames = np.array([v.name for v in achr.model.variables]) s = achr.sample(10, fluxes=False) assert s.shape == (10, achr.warmup.shape[1]) assert (s.columns == vnames).all() assert (achr.validate(s) == "v").all()
def test_validate_wrong_sample(achr: ACHRSampler, model: "Model") -> None: """Test sample correctness.""" s = achr.sample(10) s["hello"] = 1 with pytest.raises(ValueError): achr.validate(s)
def achr(model: "Model") -> ACHRSampler: """Return ACHRSampler instance for tests.""" sampler = ACHRSampler(model, thinning=1) assert (sampler.n_warmup > 0) and (sampler.n_warmup <= 2 * len(model.variables)) assert all(sampler.validate(sampler.warmup) == "v") return sampler
def achr(model): """Return ACHRSampler instance for tests.""" sampler = ACHRSampler(model, thinning=1) assert ((sampler.n_warmup > 0) and (sampler.n_warmup <= 2 * len(model.variables))) assert all(sampler.validate(sampler.warmup) == "v") return sampler
def test_complicated_model(): """Test a complicated model. Difficult model since the online mean calculation is numerically unstable so many samples weakly violate the equality constraints. """ model = Model('flux_split') reaction1 = Reaction('V1') reaction2 = Reaction('V2') reaction3 = Reaction('V3') reaction1.bounds = (0, 6) reaction2.bounds = (0, 8) reaction3.bounds = (0, 10) A = Metabolite('A') reaction1.add_metabolites({A: -1}) reaction2.add_metabolites({A: -1}) reaction3.add_metabolites({A: 1}) model.add_reactions([reaction1, reaction2, reaction3]) optgp = OptGPSampler(model, 1, seed=42) achr = ACHRSampler(model, seed=42) optgp_samples = optgp.sample(100) achr_samples = achr.sample(100) assert any(optgp_samples.corr().abs() < 1.0) assert any(achr_samples.corr().abs() < 1.0) # > 95% are valid assert sum(optgp.validate(optgp_samples) == "v") > 95 assert sum(achr.validate(achr_samples) == "v") > 95
def test_achr_init_benchmark(model, benchmark): """Benchmark inital ACHR sampling.""" benchmark(lambda: ACHRSampler(model))
import statsmodels from scipy.stats import sem, t from scipy import mean from statsmodels.sandbox.stats.multicomp import multipletests import seaborn as sns from scipy.stats import hypergeom modelIN = cobra.io.load_matlab_model('HumanGEMNHBESARS_cons.mat') modelUIN = cobra.io.load_matlab_model('HumanGEMNHBEMock_cons.mat') modelIB = cobra.io.load_matlab_model('HumanGEMBiopsySARS.mat') modelUIB = cobra.io.load_matlab_model('HumanGEMBiopsyMock.mat') modelIN.reactions.EX_sarscov2s.bounds = (-0.1259, 1000) modelUIN.reactions.HMR_10024.bounds = (-0.0088, 1000) achr_IN = ACHRSampler(modelIN, thinning=100) achr_UIN = ACHRSampler(modelUIN, thinning=100) achr_IB = ACHRSampler(modelIB, thinning=100) achr_UIB = ACHRSampler(modelUIB, thinning=100) samples_IB = achr_IB.sample(10000) samples_UIB = achr_UIB.sample(10000) samples_IN = achr_IN.sample(10000) samples_UIN = achr_UIN.sample(10000) #KS Test def bootstrapCI(rxn): bsci = [] for i in range(1000):
def test_batch_sampling(achr: ACHRSampler) -> None: """Test batch sampling.""" for b in achr.batch(5, 4): assert all(achr.validate(b) == "v")
def test_sampling(achr: ACHRSampler) -> None: """Test sampling.""" s = achr.sample(10) assert all(achr.validate(s) == "v")
def test_achr_init_benchmark(model: "Model", benchmark: Callable) -> None: """Benchmark inital ACHR sampling.""" benchmark(lambda: ACHRSampler(model))