def modeling(pdata, xdata, sdata, tdata): X, S, P, t = variables('X, S, P, t') k = Parameter('k', 0.1) umax = Parameter('umax', min=0.06, max=0.25) Ki = Parameter('Ki', min=10, max=80) Ks = Parameter('Ks', min=0.5, max=8) Kip = Parameter('Kip', min=10, max=17) mx = Parameter('mx', min=0.001, max=0.1) alpha = Parameter('alpha', min=0.1, max=2.4) beta = Parameter('beta', min=0.001, max=1.2) X0 = 0.01 S0 = 50 P0 = 0.01 model_dict = { D(X, t): umax * S / (Ks + S) * X, D(S, t): -umax * S / (Ks + S) * X, D(P, t): umax * S / (Ks + S) } ode_model_monod = ODEModel(model_dict, initial={ t: 0.0, X: X0, S: S0, P: P0 }) fit = Fit(ode_model_monod, t=tdata, X=xdata, S=sdata, P=pdata) fit_result = fit.execute() return ode_model_monod, fit_result
def setUpClass(cls): # First order reaction kinetics. Data taken from http://chem.libretexts.org/Core/Physical_Chemistry/Kinetics/Rate_Laws/The_Rate_Law tdata = np.array([ 0, 0.9184, 9.0875, 11.2485, 17.5255, 23.9993, 27.7949, 31.9783, 35.2118, 42.973, 46.6555, 50.3922, 55.4747, 61.827, 65.6603, 70.0939 ]) concentration_A = np.array([ 0.906, 0.8739, 0.5622, 0.5156, 0.3718, 0.2702, 0.2238, 0.1761, 0.1495, 0.1029, 0.086, 0.0697, 0.0546, 0.0393, 0.0324, 0.026 ]) concentration_B = np.max(concentration_A) - concentration_A # Define our ODE model A, B, t = variables('A, B, t') k = Parameter('k') model_dict = {D(A, t): -k * A**2, D(B, t): k * A**2} model = ODEModel(model_dict, initial={ t: tdata[0], A: concentration_A[0], B: 0 }) cls.guess = interactive_guess.InteractiveGuess(model, A=concentration_A, B=concentration_B, t=tdata, n_points=250)
def test_single_eval(self): """ Eval an ODEModel at a single value rather than a vector. """ x, y, t = variables('x, y, t') k, = parameters('k') # C is the integration constant. # The harmonic oscillator as a system, >1st order is not supported yet. harmonic_dict = { D(x, t): - k * y, D(y, t): k * x, } # Make a second model to prevent caching of integration results. # This also means harmonic_dict should NOT be a Model object. harmonic_model_array = ODEModel(harmonic_dict, initial={t: 0.0, x: 1.0, y: 0.0}) harmonic_model_points = ODEModel(harmonic_dict, initial={t: 0.0, x: 1.0, y: 0.0}) tdata = np.linspace(-100, 100, 101) X, Y = harmonic_model_array(t=tdata, k=0.1) # Shuffle the data to prevent using the result at time t to calculate # t+dt random_order = np.random.permutation(len(tdata)) for idx in random_order: t = tdata[idx] X_val = X[idx] Y_val = Y[idx] X_point, Y_point = harmonic_model_points(t=t, k=0.1) self.assertAlmostEqual(X_point[0], X_val) self.assertAlmostEqual(Y_point[0], Y_val)
def test_initial_parameters(): """ Identical to test_polgar, but with a0 as free Parameter. """ a, b, c, d, t = variables('a, b, c, d, t') k, p, l, m = parameters('k, p, l, m') a0 = Parameter('a0', min=0, value=10, fixed=True) c0 = Parameter('c0', min=0, value=0.1) b = a0 - d + a model_dict = { D(d, t): l * c * b - m * d, D(c, t): k * a * b - p * c - l * c * b + m * d, D(a, t): - k * a * b + p * c, } ode_model = ODEModel(model_dict, initial={t: 0.0, a: a0, c: c0, d: 0.0}) # Generate some data tdata = np.linspace(0, 3, 1000) # Eval AA, AAB, BAAB = ode_model(t=tdata, k=0.1, l=0.2, m=.3, p=0.3, a0=10, c0=0) fit = Fit(ode_model, t=tdata, a=AA, c=AAB, d=BAAB) results = fit.execute() print(results) assert results.value(a0) == pytest.approx(10, abs=1e-8) assert results.value(c0) == pytest.approx(0, abs=1e-8) assert ode_model.params == [a0, c0, k, l, m, p] assert ode_model.initial_params == [a0, c0] assert ode_model.model_params == [a0, k, l, m, p]
def test_simple_kinetics(self): """ Simple kinetics data to test fitting """ tdata = np.array([10, 26, 44, 70, 120]) adata = 10e-4 * np.array([44, 34, 27, 20, 14]) a, b, t = variables('a, b, t') k, a0 = parameters('k, a0') k.value = 0.01 # a0.value, a0.min, a0.max = 54 * 10e-4, 40e-4, 60e-4 a0 = 54 * 10e-4 model_dict = { D(a, t): -k * a**2, D(b, t): k * a**2, } ode_model = ODEModel(model_dict, initial={t: 0.0, a: a0, b: 0.0}) fit = ConstrainedNumericalLeastSquares(ode_model, t=tdata, a=adata, b=None) fit_result = fit.execute(tol=1e-9) self.assertAlmostEqual(fit_result.value(k), 4.302875e-01, 4) self.assertTrue(fit_result.stdev(k) is None)
def test_full_eval_range(self): """ Test if ODEModels can be evaluated at t < t_initial. A bit of a no news is good news test. """ tdata = np.array([0, 10, 26, 44, 70, 120]) adata = 10e-4 * np.array([54, 44, 34, 27, 20, 14]) a, b, t = variables('a, b, t') k, a0 = parameters('k, a0') k.value = 0.01 t0 = tdata[2] a0 = adata[2] b0 = 0.02729855 # Obtained from evaluating from t=0. model_dict = { D(a, t): - k * a**2, D(b, t): k * a**2, } ode_model = ODEModel(model_dict, initial={t: t0, a: a0, b: b0}) fit = Fit(ode_model, t=tdata, a=adata, b=None) ode_result = fit.execute() self.assertGreater(ode_result.r_squared, 0.95, 4) # Now start from a timepoint that is not in the t-array such that it # triggers another pathway to be taken in integrating it. # Again, no news is good news. ode_model = ODEModel(model_dict, initial={t: t0 + 1e-5, a: a0, b: b0}) fit = Fit(ode_model, t=tdata, a=adata, b=None) ode_result = fit.execute() self.assertGreater(ode_result.r_squared, 0.95, 4)
def test_simple_kinetics(self): """ Simple kinetics data to test fitting """ tdata = np.array([10, 26, 44, 70, 120]) adata = 10e-4 * np.array([44, 34, 27, 20, 14]) a, b, t = variables('a, b, t') k, a0 = parameters('k, a0') k.value = 0.01 # a0.value, a0.min, a0.max = 54 * 10e-4, 40e-4, 60e-4 a0 = 54 * 10e-4 model_dict = { D(a, t): -k * a**2, D(b, t): k * a**2, } ode_model = ODEModel(model_dict, initial={t: 0.0, a: a0, b: 0.0}) # Generate some data tvec = np.linspace(0, 500, 1000) fit = NumericalLeastSquares(ode_model, t=tdata, a=adata, b=None) fit_result = fit.execute() # print(fit_result) self.assertAlmostEqual(fit_result.value(k), 4.302875e-01, 4) self.assertAlmostEqual(fit_result.stdev(k), 6.447068e-03, 4) fit = Fit(ode_model, t=tdata, a=adata, b=None) fit_result = fit.execute() # print(fit_result) self.assertAlmostEqual(fit_result.value(k), 4.302875e-01, 4) self.assertTrue(np.isnan(fit_result.stdev(k)))
def test_polgar(self): """ Analysis of data published here: This whole ODE support was build to do this analysis in the first place """ a, b, c, d, t = variables('a, b, c, d, t') k, p, l, m = parameters('k, p, l, m') a0 = 10 b = a0 - d + a model_dict = { D(d, t): l * c * b - m * d, D(c, t): k * a * b - p * c - l * c * b + m * d, D(a, t): -k * a * b + p * c, } ode_model = ODEModel(model_dict, initial={ t: 0.0, a: a0, c: 0.0, d: 0.0 }) # Generate some data tdata = np.linspace(0, 3, 1000) # Eval AA, AAB, BAAB = ode_model(t=tdata, k=0.1, l=0.2, m=.3, p=0.3)
def test_simple_kinetics(self): """ Simple kinetics data to test fitting """ tdata = np.array([10, 26, 44, 70, 120]) adata = 10e-4 * np.array([44, 34, 27, 20, 14]) a, b, t = variables('a, b, t') k, a0 = parameters('k, a0') k.value = 0.01 # a0.value, a0.min, a0.max = 54 * 10e-4, 40e-4, 60e-4 a0 = 54 * 10e-4 model_dict = { D(a, t): - k * a**2, D(b, t): k * a**2, } ode_model = ODEModel(model_dict, initial={t: 0.0, a: a0, b: 0.0}) # Analytical solution model = Model({a: 1 / (k * t + 1 / a0)}) fit = Fit(model, t=tdata, a=adata) fit_result = fit.execute() fit = Fit(ode_model, t=tdata, a=adata, b=None, minimizer=MINPACK) ode_result = fit.execute() self.assertAlmostEqual(ode_result.value(k) / fit_result.value(k), 1.0, 4) self.assertAlmostEqual(ode_result.stdev(k) / fit_result.stdev(k), 1.0, 4) self.assertAlmostEqual(ode_result.r_squared / fit_result.r_squared, 1, 4) fit = Fit(ode_model, t=tdata, a=adata, b=None) ode_result = fit.execute() self.assertAlmostEqual(ode_result.value(k) / fit_result.value(k), 1.0, 4) self.assertAlmostEqual(ode_result.stdev(k) / fit_result.stdev(k), 1.0, 4) self.assertAlmostEqual(ode_result.r_squared / fit_result.r_squared, 1, 4)
def test_van_der_pol(): """ http://hplgit.github.io/odespy/doc/pub/tutorial/html/main_odespy.html """ u_0, u_1, t = variables('u_0, u_1, t') model_dict = {D(u_0, t): u_1, D(u_1, t): 3 * (1 - u_0**2) * u_1 - u_1} ode_model = ODEModel(model_dict, initial={t: 0.0, u_0: 2.0, u_1: 1.0})
def test_pickle(): """ Make sure models can be pickled are preserved when pickling """ a, b = parameters('a, b') x, y = variables('x, y') exact_model = Model({y: a * x ** b}) constraint = Model.as_constraint(Eq(a, b), exact_model) num_model = CallableNumericalModel( {y: a * x ** b}, independent_vars=[x], params=[a, b] ) connected_num_model = CallableNumericalModel( {y: a * x ** b}, connectivity_mapping={y: {x, a, b}} ) # Test if lsoda args and kwargs are pickled too ode_model = ODEModel({D(y, x): a * x + b}, {x: 0.0}, 3, 4, some_kwarg=True) models = [exact_model, constraint, num_model, ode_model, connected_num_model] for model in models: new_model = pickle.loads(pickle.dumps(model)) # Compare signatures assert model.__signature__ == new_model.__signature__ # Trigger the cached vars because we compare `__dict__` s model.vars new_model.vars # Explicitly make sure the connectivity mapping is identical. assert model.connectivity_mapping == new_model.connectivity_mapping if not isinstance(model, ODEModel): model.function_dict model.vars_as_functions new_model.function_dict new_model.vars_as_functions assert model.__dict__ == new_model.__dict__
def test_known_solution(self): p, c1 = parameters('p, c1') y, t = variables('y, t') p.value = 3.0 model_dict = { D(y, t): - p * y, } # Lets say we know the exact solution to this problem sol = Model({y: exp(- p * t)}) # Generate some data tdata = np.linspace(0, 3, 10001) ydata = sol(t=tdata, p=3.22)[0] ydata += np.random.normal(0, 0.005, ydata.shape) ode_model = ODEModel(model_dict, initial={t: 0.0, y: ydata[0]}) fit = Fit(ode_model, t=tdata, y=ydata) ode_result = fit.execute() c1.value = ydata[0] fit = Fit(sol, t=tdata, y=ydata) fit_result = fit.execute() self.assertAlmostEqual(ode_result.value(p) / fit_result.value(p), 1, 2) self.assertAlmostEqual(ode_result.r_squared / fit_result.r_squared, 1, 4) self.assertAlmostEqual(ode_result.stdev(p) / fit_result.stdev(p), 1, 3)
def oneProductModel(kABval=1e-2, conc0=50e-3, tvec=np.linspace(0, 200000, 100)): # Here we describe a model with A+B->AB A, B, AB, t = variables('A, B, AB, t') tdata = [0, 1, 2] kAB = Parameter('kAB', kABval) # Rate constant for formation of AB # here's a list of rate expressions for each component in the mixture model_dict = { D(AB, t): kAB * A * B, D(A, t): -kAB * A * B, D(B, t): -(kAB * A * B), } # here we define the ODE model and specify the start concentrations of each reagent ode_model = ODEModel(model_dict, initial={ t: 0.0, A: conc0, B: conc0, AB: 0, }) # and then we fit the ODE model fit = Fit(ode_model, t=tdata, A=None, B=None, AB=None) fit_result = fit.execute() # Generate some data ans = ode_model(t=tvec, **fit_result.params)._asdict() # and plot it plt.plot(tvec, ans[AB], label='[AB]') #plt.plot(tvec, BCres, label='[BC]') #plt.scatter(tdata, adata) plt.ylabel('Conc [M]') plt.xlabel('Time [s]') plt.legend() plt.show()
def test_odemodel_sanity(): """ If a user provides an ODE like model directly to fit without explicitly turning it into one, give a warning. """ tdata = np.array([0, 10, 26, 44, 70, 120]) adata = 10e-4 * np.array([54, 44, 34, 27, 20, 14]) a, t = variables('a, t') k, a0 = parameters('k, a0') model_dict = { D(a, t): - k * a * t, } with pytest.raises(RuntimeWarning): fit = Fit(model_dict, t=tdata, a=adata)
def test_simple_kinetics(): """ Simple kinetics data to test fitting """ tdata = np.array([10, 26, 44, 70, 120]) adata = 10e-4 * np.array([44, 34, 27, 20, 14]) a, b, t = variables('a, b, t') k, a0 = parameters('k, a0') k.value = 0.01 # a0.value, a0.min, a0.max = 54 * 10e-4, 40e-4, 60e-4 a0 = 54 * 10e-4 model_dict = { D(a, t): -k * a**2, D(b, t): k * a**2, } ode_model = ODEModel(model_dict, initial={t: 0.0, a: a0, b: 0.0}) fit = Fit(ode_model, t=tdata, a=adata, b=None) fit_result = fit.execute(tol=1e-9) assert fit_result.value(k) == pytest.approx(4.302875e-01, 1e-5) assert fit_result.stdev(k) == pytest.approx(6.447068e-03, 1e-5)
def test_neg(): """ Test negation of all model types """ x, y_1, y_2 = variables('x, y_1, y_2') a, b = parameters('a, b') model_dict = {y_2: a * x ** 2, y_1: 2 * x * b} model = Model(model_dict) model_neg = - model for key in model: assert model[key] == - model_neg[key] # Constraints constraint = Model.as_constraint(Eq(a * x, 2), model) constraint_neg = - constraint # for key in constraint: assert constraint[constraint.dependent_vars[0]] == - constraint_neg[constraint_neg.dependent_vars[0]] # ODEModel odemodel = ODEModel({D(y_1, x): a * x}, initial={a: 1.0}) odemodel_neg = - odemodel for key in odemodel: assert odemodel[key] == - odemodel_neg[key] # For models with interdependency, negation should only change the # dependent components. model_dict = {x: y_1**2, y_1: a * y_2 + b} model = Model(model_dict) model_neg = - model for key in model: if key in model.dependent_vars: assert model[key] == - model_neg[key] elif key in model.interdependent_vars: assert model[key] == model_neg[key] else: pytest.fail()
def test_known_solution(self): p, c1, c2 = parameters('p, c1, c2') y, t = variables('y, t') p.value = 3.0 model_dict = { D(y, t): -p * y, } # Lets say we know the exact solution to this problem sol = c1 * exp(-p * t) # Generate some data tdata = np.linspace(0, 3, 101) ydata = sol(t=tdata, c1=1.0, p=3.22) ode_model = ODEModel(model_dict, initial={t: 0.0, y: 1.0}) fit = Fit(ode_model, t=tdata, y=ydata) fit_result = fit.execute() y_sol, = ode_model(tdata, **fit_result.params) self.assertAlmostEqual(3.22, fit_result.value(p))
# First order reaction kinetics. Data taken from http://chem.libretexts.org/Core/Physical_Chemistry/Kinetics/Rate_Laws/The_Rate_Law tdata = np.array([ 0, 0.9184, 9.0875, 11.2485, 17.5255, 23.9993, 27.7949, 31.9783, 35.2118, 42.973, 46.6555, 50.3922, 55.4747, 61.827, 65.6603, 70.0939 ]) concentration_A = np.array([ 0.906, 0.8739, 0.5622, 0.5156, 0.3718, 0.2702, 0.2238, 0.1761, 0.1495, 0.1029, 0.086, 0.0697, 0.0546, 0.0393, 0.0324, 0.026 ]) concentration_B = np.max(concentration_A) - concentration_A # Define our ODE model A, B, t = variables('A, B, t') k = Parameter('k') model_dict = {D(A, t): -k * A**2, D(B, t): k * A**2} model = ODEModel(model_dict, initial={ t: tdata[0], A: concentration_A[0], B: 0 }) guess = InteractiveGuess(model, A=concentration_A, B=concentration_B, t=tdata, n_points=250) guess.execute() print(guess)
def test_interdependency(): a, b = parameters('a, b') x, y, z = variables('x, y, z') model_dict = { y: a**3 * x + b**2, z: y**2 + a * b } callable_model = CallableModel(model_dict) assert callable_model.independent_vars == [x] assert callable_model.interdependent_vars == [y] assert callable_model.dependent_vars == [z] assert callable_model.params == [a, b] assert callable_model.connectivity_mapping == {y: {a, b, x}, z: {a, b, y}} assert callable_model(x=3, a=1, b=2) == pytest.approx(np.atleast_2d([7, 51]).T) for var, func in callable_model.vars_as_functions.items(): # TODO comment on what this does str_con_map = set(x.name for x in callable_model.connectivity_mapping[var]) str_args = set(str(x.__class__) if isinstance(x, Function) else x.name for x in func.args) assert str_con_map == str_args jac_model = jacobian_from_model(callable_model) assert jac_model.params == [a, b] assert jac_model.dependent_vars == [D(z, a), D(z, b), z] assert jac_model.interdependent_vars == [D(y, a), D(y, b), y] assert jac_model.independent_vars == [x] for p1, p2 in zip_longest(jac_model.__signature__.parameters, [x, a, b]): assert str(p1) == str(p2) # The connectivity of jac_model should be that from it's own components # plus that of the model. The latter is needed to properly compute the # Hessian. jac_con_map = {D(y, a): {a, x}, D(y, b): {b}, D(z, a): {b, y, D(y, a)}, D(z, b): {a, y, D(y, b)}, y: {a, b, x}, z: {a, b, y}} assert jac_model.connectivity_mapping == jac_con_map jac_model_dict = {D(y, a): 3 * a**2 * x, D(y, b): 2 * b, D(z, a): b + 2 * y * D(y, a), D(z, b): a + 2 * y * D(y, b), y: callable_model[y], z: callable_model[z]} assert jac_model.model_dict == jac_model_dict for var, func in jac_model.vars_as_functions.items(): str_con_map = set(x.name for x in jac_model.connectivity_mapping[var]) str_args = set(str(x.__class__) if isinstance(x, Function) else x.name for x in func.args) assert str_con_map == str_args hess_model = hessian_from_model(callable_model) # Result according to Mathematica hess_as_dict = { D(y, (a, 2)): 6 * a * x, D(y, a, b): 0, D(y, b, a): 0, D(y, (b, 2)): 2, D(z, (a, 2)): 2 * D(y, a)**2 + 2 * y * D(y, (a, 2)), D(z, a, b): 1 + 2 * D(y, b) * D(y, a) + 2 * y * D(y, a, b), D(z, b, a): 1 + 2 * D(y, b) * D(y, a) + 2 * y * D(y, a, b), D(z, (b, 2)): 2 * D(y, b)**2 + 2 * y * D(y, (b, 2)), D(y, a): 3 * a ** 2 * x, D(y, b): 2 * b, D(z, a): b + 2 * y * D(y, a), D(z, b): a + 2 * y * D(y, b), y: callable_model[y], z: callable_model[z] } assert dict(hess_model) == hess_as_dict assert hess_model.params == [a, b] assert hess_model.dependent_vars == [D(z, (a, 2)), D(z, a, b), D(z, (b, 2)), D(z, b, a), D(z, a), D(z, b), z] assert hess_model.interdependent_vars == [D(y, (a, 2)), D(y, a), D(y, b), y] assert hess_model.independent_vars == [x] model = Model(model_dict) assert model(x=3, a=1, b=2) == pytest.approx(np.atleast_2d([7, 51]).T) assert model.eval_jacobian(x=3, a=1, b=2) == pytest.approx(np.array([[[9], [4]], [[128], [57]]])) assert model.eval_hessian(x=3, a=1, b=2) == pytest.approx(np.array([[[[18], [0]], [[0], [2]]],[[[414], [73]], [[73], [60]]]])) assert model.__signature__ == model.jacobian_model.__signature__ assert model.__signature__ == model.hessian_model.__signature__
import numpy as np from symfit.contrib.interactive_guess import InteractiveGuess2D # First order reaction kinetics. Data taken from http://chem.libretexts.org/Core/Physical_Chemistry/Kinetics/Rate_Laws/The_Rate_Law tdata = np.array([0, 0.9184, 9.0875, 11.2485, 17.5255, 23.9993, 27.7949, 31.9783, 35.2118, 42.973, 46.6555, 50.3922, 55.4747, 61.827, 65.6603, 70.0939]) concentration_A = np.array([0.906, 0.8739, 0.5622, 0.5156, 0.3718, 0.2702, 0.2238, 0.1761, 0.1495, 0.1029, 0.086, 0.0697, 0.0546, 0.0393, 0.0324, 0.026]) concentration_B = np.max(concentration_A) - concentration_A # Define our ODE model A, B, t = variables('A, B, t') k = Parameter() model_dict = { D(A, t): - k * A**2, D(B, t): k * A**2 } model = ODEModel(model_dict, initial={t: tdata[0], A: concentration_A[0], B: 0}) guess = InteractiveGuess2D(model, A=concentration_A, B=concentration_B, t=tdata, n_points=250) guess.execute() print(guess) fit = NumericalLeastSquares(model, A=concentration_A, B=concentration_B, t=tdata) fit_result = fit.execute() print(fit_result)
def twoProductModel(kABval=1e-2, kBCval=1e-2, conc0=50e-3, tvec=np.linspace(0, 200000, 100)): # conc0 is initial concentration tdata = [0, 1, 2] # Here we describe a model with A+B->AB and B+C->BC A, B, C, AB, BC, t = variables('A, B, C, AB, BC, t') kAB = Parameter('kAB', kABval) # Rate constant for formation of AB kBC = Parameter('kBC', kBCval) # rate constant for formation of BC # here's a list of rate expressions for each component in the mixture model_dict = { D(AB, t): kAB * A * B, D(BC, t): kBC * B * C, D(A, t): -kAB * A * B, D(B, t): -(kAB * A * B + kBC * B * C), D(C, t): -kBC * B * C, } # here we define the ODE model and specify the start concentrations of each reagent ode_model = ODEModel(model_dict, initial={ t: 0.0, A: conc0, B: conc0, C: conc0, AB: 0, BC: 0 }) # and then we fit the ODE model fit = Fit(ode_model, t=tdata, A=None, B=None, AB=None, BC=None, C=None) fit_result = fit.execute() # Generate some data ans = ode_model(t=tvec, **fit_result.params)._asdict() # and plot it plt.plot(tvec, ans[AB], label='[AB]') plt.plot(tvec, ans[BC], label='[BC]') #plt.scatter(tdata, adata) plt.ylabel('Conc [M]') plt.xlabel('Time [s]') plt.legend() plt.show() res = [ans[AB][-1], ans[BC][-1]] resNorm = res / sum(res) plt.bar([1, 2], 100 * resNorm) plt.xticks([1, 2], ('[AB]', '[BC]')) plt.ylabel('%age at eq') plt.show() # enhancement, in percent, compared to equal concentrations everywhere resEnh = 100 * ( (np.array(resNorm)) - 1 / len(resNorm)) / (1 / len(resNorm)) # rounding errors can give a spurious difference: set small values to zero resEnh[abs(resEnh) < 1e-5] = 0 if (sum(abs(resEnh)) > 0): plt.bar([1, 2], resEnh) plt.xticks([1, 2], ('[AB]', '[BC]')) plt.ylabel('%age at eq') plt.title('Enhancement / %') plt.show() else: print("No enhancement compared to equal rates")
def square(kABval=1e-2, kACval=1e-2, kBDval=1e-2, kCDval=1e-2, kBCval=1e-2, kADval=1e-2, conc0=50e-3, tvec=np.linspace(0, 200000, 100)): # Here we describe a model with A+B->AB A, B, C, Di, AB, AC, CD, BD, AD, BC, t = variables( 'A, B, C, Di, AB, AC, CD, BD, AD, BC, t') tdata = [0, 1, 2, 100, 1000, 10000] kAB = Parameter('kAB', kABval) # Rate constant for formation of AB kAC = Parameter('kAC', kACval) # Rate constant for formation of AC kBD = Parameter('kBD', kBDval) # Rate constant for formation of BD kCD = Parameter('kCD', kCDval) # Rate constant for formation of CD kBC = Parameter( 'kBC', kBCval) # Rate constant for formation of BC ## cross-connection kAD = Parameter( 'kAD', kADval) # Rate constant for formation of AD ## cross-connection # here's a list of rate expressions for each component in the mixture # here I'm calling the concentration of D as'Di' to avoid confusion model_dict = { D(AB, t): kAB * A * B, D(AC, t): kAC * A * C, D(BD, t): kBD * B * Di, D(CD, t): kCD * C * Di, D(BC, t): kBC * B * C, D(AD, t): kAD * A * Di, D(A, t): -(kAB * A * B + kAC * A * C + kAD * A * Di), D(B, t): -(kAB * A * B + kBD * B * Di + kBC * B * C), D(C, t): -(kAC * A * C + kCD * C * Di + kBC * B * C), D(Di, t): -(kBD * B * Di + kCD * C * Di + kAD * A * Di), } # here we define the ODE model and specify the start concentrations of each reagent ode_model = ODEModel(model_dict, initial={ t: 0.0, A: conc0, B: conc0, C: conc0, Di: conc0, AB: 0, BC: 0, AC: 0, BD: 0, CD: 0, AD: 0 }) # and then we fit the ODE model fit = Fit(ode_model, t=tdata, A=None, B=None, C=None, Di=None, AB=None, BC=None, AC=None, BD=None, CD=None, AD=None) fit_result = fit.execute() # Generate some data ans = ode_model(t=tvec, **fit_result.params)._asdict() # and plot it plt.plot(tvec, ans[AB], label='[AB]') plt.plot(tvec, ans[AC], label='[AC]') plt.plot(tvec, ans[CD], label='[CD]') plt.plot(tvec, ans[BC], label='[BC]') plt.plot(tvec, ans[BD], label='[BD]') plt.plot(tvec, ans[AD], label='[AD]') #plt.plot(tvec, BCres, label='[BC]') #plt.scatter(tdata, adata) plt.ylabel('Conc [M]') plt.xlabel('Time [s]') plt.legend() plt.show() res = [ ans[AB][-1], ans[AC][-1], ans[CD][-1], ans[BC][-1], ans[BD][-1], ans[AD][-1] ] resNorm = res / sum(res) plt.bar([1, 2, 3, 4, 5, 6], 100 * resNorm) plt.xticks([1, 2, 3, 4, 5, 6], ('[AB]', '[AC]', '[CD]', '[BC]', '[BD]', '[AD]')) plt.ylabel('%age at eq') plt.show() # enhancement, in percent, compared to equal concentrations everywhere resEnh = 100 * ( (np.array(resNorm)) - 1 / len(resNorm)) / (1 / len(resNorm)) # rounding errors can give a spurious difference: set small values to zero resEnh[abs(resEnh) < 1e-5] = 0 if (sum(abs(resEnh)) > 0): yval = [1, 2, 3, 4, 5, 6] plt.bar(yval, resEnh) plt.xticks(yval, ('[AB]', '[AC]', '[CD]', '[BC]', '[BD]', '[AD]')) plt.ylabel('%age at eq') plt.title('Enhancement / %') plt.show() else: print("No enhancement compared to equal rates")
# SPDX-License-Identifier: MIT #!/usr/bin/env python3 # -*- coding: utf-8 -*- from symfit import variables, Parameter, Fit, D, ODEModel import numpy as np from symfit.contrib.interactive_guess import InteractiveGuess # First order reaction kinetics. Data taken from # http://chem.libretexts.org/Core/Physical_Chemistry/Kinetics/Rate_Laws/The_Rate_Law tdata = np.array([0, 0.9184, 9.0875, 11.2485, 17.5255, 23.9993, 27.7949, 31.9783, 35.2118, 42.973, 46.6555, 50.3922, 55.4747, 61.827, 65.6603, 70.0939]) concentration = np.array([0.906, 0.8739, 0.5622, 0.5156, 0.3718, 0.2702, 0.2238, 0.1761, 0.1495, 0.1029, 0.086, 0.0697, 0.0546, 0.0393, 0.0324, 0.026]) # Define our ODE model A, t = variables('A, t') k = Parameter('k') model = ODEModel({D(A, t): - k * A}, initial={t: tdata[0], A: concentration[0]}) guess = InteractiveGuess(model, A=concentration, t=tdata, n_points=250) guess.execute() print(guess) fit = Fit(model, A=concentration, t=tdata) fit_result = fit.execute() print(fit_result)
# First order reaction kinetics. Data taken from # http://chem.libretexts.org/Core/Physical_Chemistry/Kinetics/Rate_Laws/The_Rate_Law tdata = np.array([ 0, 0.9184, 9.0875, 11.2485, 17.5255, 23.9993, 27.7949, 31.9783, 35.2118, 42.973, 46.6555, 50.3922, 55.4747, 61.827, 65.6603, 70.0939 ]) concentration = np.array([ 0.906, 0.8739, 0.5622, 0.5156, 0.3718, 0.2702, 0.2238, 0.1761, 0.1495, 0.1029, 0.086, 0.0697, 0.0546, 0.0393, 0.0324, 0.026 ]) # Define our ODE model A, B, t = variables('A, B, t') k = Parameter('k') model = ODEModel({ D(A, t): -k * A, D(B, t): k * A }, initial={ t: tdata[0], A: concentration[0], B: 0.0 }) fit = Fit(model, A=concentration, t=tdata) fit_result = fit.execute() print(fit_result) # Plotting, irrelevant to the symfit part. t_axis = np.linspace(0, 80)
def generalModel(rates, conc0, tvec=np.linspace(0, 200000, 100)): # make a list of parameters numEl = np.shape(rates)[0] model_dict = {} pp = () vv = () # first create variables for initial species for ii in np.arange(0, numEl): var = Variable(chr(ii + 65)) vv = vv + (var, ) t = Variable('t') kdict = {} # then create variables for products and rate constants (parameters) ik = 0 for ii in np.arange(0, numEl): for ij in np.arange(0, numEl): if ii < ij: var = Variable(chr(ii + 65) + chr(ij + 65)) vv = vv + (var, ) par = Parameter('k' + chr(ii + 65) + chr(ij + 65), rates[ii, ij]) pp = pp + (par, ) # a dict so we can easily find rate constant indices later (this is hacky but if it works...) kdict[str(ii) + str(ij)] = ik kdict[str(ij) + str(ii)] = ik ik = ik + 1 # now create model ik = 0 for ii in np.arange(0, numEl): # this will be an expression for what's happening to the SM concentration. It's easiest if we just add each # relevant product forming reaction to this expression, then take its negative later on smexpr = 0 for ij in np.arange(0, numEl): if ii < ij: model_dict[D( vv[numEl + ik], t)] = pp[kdict[str(ii) + str(ij)]] * vv[ii] * vv[ij] smexpr = smexpr + pp[kdict[str(ii) + str(ij)]] * vv[ii] * vv[ij] ik = ik + 1 elif ii > ij: # need to have this otherwise we miss a lot of contributions for B/C/D smexpr = smexpr + pp[kdict[str(ij) + str(ii)]] * vv[ij] * vv[ii] # we're still in the loop here, at the level of starting materials. This part creates d[A]/dt (etc for B, C, D) model_dict[D(vv[ii], t)] = -(smexpr) # set initial parameters: at time 0, all concentrations of products are zero and concentration of SMs is fixed # (this could be changed to allow variable concs TODO) # while we're here, also set the arguments for the fit command later on to zero. initial = { t: 0.0, } fitargs = {} for el in vv: if len(el.name) == 1: initial[el] = conc0 else: initial[el] = 0 fitargs[el.name] = None # define the model ode_model = ODEModel(model_dict, initial=initial) # honestly I don't know what this does but it seems to have no effect on results (based on my incomplete testing!) # it just needs to be there and not 'None' tdata = [0, 1, 2] # and then we fit the ODE model fit = Fit(ode_model, **fitargs, t=tdata) fit_result = fit.execute() # Generate some data from our fit model ans = ode_model(t=tvec, **fit_result.params)._asdict() # and plot it result = [] legtxt = () for ii in np.arange(numEl, len(vv), 1): plt.plot(tvec, ans[vv[ii]], label=vv[ii].name) result.append(ans[vv[ii]][-1]) legtxt = legtxt + (vv[ii].name, ) plt.ylabel('Conc [M]') plt.xlabel('Time [s]') plt.legend() plt.show() resNorm = result / sum(result) xpos = np.arange(1, len(resNorm) + 1, 1) plt.bar(xpos, 100 * resNorm) plt.xticks(xpos, legtxt) plt.ylabel('%age at eq') plt.show() # enhancement, in percent, compared to equal concentrations everywhere resEnh = 100 * ( (np.array(resNorm)) - 1 / len(resNorm)) / (1 / len(resNorm)) # rounding errors can give a spurious difference: set small values to zero resEnh[abs(resEnh) < 1e-5] = 0 if (sum(abs(resEnh)) > 0): plt.bar(xpos, resEnh) plt.xticks(xpos, legtxt) plt.ylabel('%age at eq') plt.title('Enhancement / %') plt.show() else: print( "No enhancement compared to equal rates in a fully-connected network" )
from symfit import variables, parameters, Fit, D, ODEModel import numpy as np import matplotlib.pyplot as plt import seaborn as sns # Example of the easy of use of the symfit ODE integration syntax. a, b, c, d, t = variables('a, b, c, d, t') k, p, l, m = parameters('k, p, l, m') a0 = 10 b = a0 - d + a # [B] is not independent. model_dict = { D(d, t): l * c * b - m * d, D(c, t): k * a * b - p * c - l * c * b + m * d, D(a, t): -k * a * b + p * c, } model = ODEModel(model_dict, initial={t: 0.0, a: a0, c: 0.0, d: 0.0}) # Generate some data tdata = np.linspace(0, 3, 1000) # Eval the normal way. AA, AAB, BAAB = model(t=tdata, k=0.1, l=0.2, m=.3, p=0.3) plt.plot(tdata, AA, color='red', label='[AA]') plt.plot(tdata, AAB, color='blue', label='[AAB]') plt.plot(tdata, BAAB, color='green', label='[BAAB]') plt.plot(tdata, b(d=BAAB, a=AA), color='pink', label='[B]') # plt.plot(tdata, AA + AAB + BAAB, color='black', label='total') plt.legend()