def test_sens_limits(): par = Params() p, y, v = CreateSymbols(par) reversal_potential = par.Erev para = np.array([ 2.07, 7.17E1, 3.44E-2, 6.18E1, 4.18E2, 2.58E1, 4.75E1, 2.51E1, 3.33E1 ]) para = para * 1E-3 # Define system equations and initial conditions k1 = p[0] * se.exp(p[1] * v) k2 = p[2] * se.exp(-p[3] * v) k3 = p[4] * se.exp(p[5] * v) k4 = p[6] * se.exp(-p[7] * v) current_limit = (p[-1] * (par.holding_potential - reversal_potential) * k1 / (k1 + k2) * k4 / (k3 + k4)).subs( v, par.holding_potential) print("{} Current limit computed as {}".format( __file__, current_limit.subs(p, para).evalf())) sens_inf = [ float(se.diff(current_limit, p[j]).subs(p, para).evalf()) for j in range(0, par.n_params) ] print("{} sens_inf calculated as {}".format(__file__, sens_inf)) k = se.symbols('k1, k2, k3, k4') # Notation is consistent between the two papers A = se.Matrix([[-k1 - k3 - k4, k2 - k4, -k4], [k1, -k2 - k3, k4], [-k1, k3 - k1, -k2 - k4 - k1]]) B = se.Matrix([k4, 0, k1]) # Use results from HH equations current_limit = (p[-1] * (par.holding_potential - reversal_potential) * k1 / (k1 + k2) * k4 / (k3 + k4)).subs( v, par.holding_potential) funcs = GetSensitivityEquations(par, p, y, v, A, B, para, [0]) sens_inf = [ float(se.diff(current_limit, p[j]).subs(p, para).evalf()) for j in range(0, par.n_params) ] sens = funcs.SimulateForwardModelSensitivities(para)[1] # Check sens = sens_inf error = np.abs(sens_inf - sens) equal = np.all(error < 1e-10) assert (equal) return
def test_conv11(): x = sympy.Symbol("x") y = sympy.Symbol("y") x1 = Symbol("x") y1 = Symbol("y") f = sympy.Function("f") f1 = Function("f") e1 = diff(f(2 * x, y), x) e2 = diff(f1(2 * x1, y1), x1) e3 = diff(f1(2 * x1, y1), y1) assert sympify(e1) == e2 assert sympify(e1) != e3 assert e2._sympy_() == e1 assert e3._sympy_() != e1
def solve_chi_saddlepoint(mu, Sigma): """Compute the saddlepoint approximation for the generalized chi square distribution given a mean and a covariance matrix. Currently has two different ways of solving: 1. If the mean is close to zero, the system can be solved symbolically.""" P = None eigenvalues, eigenvectors = np.linalg.eig(Sigma) if (eigenvectors == np.diag(eigenvalues)).all(): P = np.eye(len(mu)) else: P = eigenvectors.T Sigma_12 = np.linalg.cholesky(Sigma) b = P @ Sigma_12 @ mu x = sym.Symbol("x") t = sym.Symbol("t") # Cumulant function K = 0 for i, l in enumerate(eigenvalues): K += (t * b[i]**2 * l) / (1 - 2 * t * l) - 1 / 2 * sym.log(1 - 2 * l * t) Kp = sym.diff(K, t).simplify() Kpp = sym.diff(K, t, t).simplify() roots = sym.lib.symengine_wrapper.solve(sym.Eq(Kp, x), t).args if len(roots) > 1: for expr in roots: trial = Kpp.subs(t, expr).subs(x, np.dot(b, b)) if trial >= 0.0: s_hat = expr else: s_hat = roots[0] f = 1 / sym.sqrt(2 * sym.pi * Kpp.subs( t, s_hat)) * sym.exp(K.subs(t, s_hat) - s_hat * x) fp = sym.Lambdify(x, f.simplify()) c = integrate.quad(fp, 0, np.inf)[0] return lambda x: 1 / c * fp(x)
def diff(expr, xx, alpha): """Differentiate expr with respect to xx. :arg expr: symengine/symengine Expression to differentiate. :arg xx: iterable of coordinates to differentiate with respect to. :arg alpha: derivative multiindex, one entry for each entry of xx indicating how many derivatives in that direction. :returns: New symengine/symengine expression.""" if isinstance(expr, sympy.Expr): return expr.diff(*(zip(xx, alpha))) else: return symengine.diff( expr, *(chain(*(repeat(x, a) for x, a in zip(xx, alpha)))))
def runEstimation(self, covariate_data): # need class of specific model being used, lambda function stored as class variable # ex. (max covariates = 3) for 3 covariates, zero_array should be length 0 # for no covariates, zero_array should be length 3 # numZeros = Model.maxCovariates - self.numCovariates # zero_array = np.zeros(numZeros) # create empty array, size of num covariates # create new lambda function that calls lambda function for all covariates # for no covariates, concatenating array a with zero element array optimize_start = time.process_time() # record time initial = self.initialEstimates() log.info("Initial estimates: %s", initial) f, x = self.LLF_sym(self.hazardSymbolic, covariate_data) # pass hazard rate function bh = np.array( [symengine.diff(f, x[i]) for i in range(self.numSymbols)]) fd = self.convertSym(x, bh, "numpy") solution_object = scipy.optimize.minimize(self.RLL_minimize, x0=initial, args=(covariate_data, ), method='Nelder-Mead') self.mle_array = self.optimizeSolution(fd, solution_object.x) optimize_stop = time.process_time() log.info("Optimization time: %s", optimize_stop - optimize_start) log.info("Optimized solution: %s", self.mle_array) self.modelParameters = self.mle_array[:self.numParameters] self.betas = self.mle_array[self.numParameters:] log.info("model parameters =", self.modelParameters) log.info("betas =", self.betas) hazard = np.array([ self.hazardNumerical(i + 1, self.modelParameters) for i in range(self.n) ]) self.hazard_array = hazard # for MVF prediction, don't want to calculate again self.modelFitting(hazard, self.mle_array, covariate_data) self.goodnessOfFit(self.mle_array, covariate_data)
import symengine vars = symengine.symbols('x y') # Define x and y variables f = symengine.sympify(['y*x**2', '5*x + sin(y)']) # Define function J = symengine.zeros(len(f), len(vars)) # Initiate Jacobian matrix # Fill Jacobian Matrix with entries for i, fi in enumerate(f): for j, s in enumerate(vars): J[i,j] = symengine.diff(fi, s) print(J)
def zweitens(alpha, beta, n1): func = ((1 - r[0])**(alpha + n1)) * ((1 + r[0])**(beta + n1)) return si.diff(func, *[r[0]] * n1)
def main(): # Check input arguments parser = get_parser() args = parser.parse_args() par = Params() plot_dir = os.path.join(args.output, "staircase") if not os.path.exists(plot_dir): os.makedirs(plot_dir) # Choose parameters (make sure conductance is the last parameter) para = np.array([ 2.07E-3, 7.17E-2, 3.44E-5, 6.18E-2, 4.18E-1, 2.58E-2, 4.75E-2, 2.51E-2, 3.33E-2 ]) # Compute resting potential for 37 degrees C reversal_potential = calculate_reversal_potential(temp=37) par.Erev = reversal_potential print("reversal potential is {}".format(reversal_potential)) # Create symbols for symbolic functions p, y, v = CreateSymbols(par) k = se.symbols('k1, k2, k3, k4') # Define system equations and initial conditions k1 = p[0] * se.exp(p[1] * v) k2 = p[2] * se.exp(-p[3] * v) k3 = p[4] * se.exp(p[5] * v) k4 = p[6] * se.exp(-p[7] * v) # Notation is consistent between the two papers A = se.Matrix([[-k1 - k3 - k4, k2 - k4, -k4], [k1, -k2 - k3, k4], [-k1, k3 - k1, -k2 - k4 - k1]]) B = se.Matrix([k4, 0, k1]) current_limit = (p[-1] * (par.holding_potential - reversal_potential) * k1 / (k1 + k2) * k4 / (k3 + k4)).subs( v, par.holding_potential) print("{} Current limit computed as {}".format( __file__, current_limit.subs(p, para).evalf())) sens_inf = [ float(se.diff(current_limit, p[j]).subs(p, para).evalf()) for j in range(0, par.n_params) ] print("{} sens_inf calculated as {}".format(__file__, sens_inf)) protocol = pd.read_csv( os.path.join( os.path.dirname(os.path.dirname(os.path.realpath(__file__))), "protocols", "protocol-staircaseramp.csv")) times = 1000 * protocol["time"].values voltages = protocol["voltage"].values spikes = 1000 * detect_spikes(protocol["time"], protocol["voltage"]) staircase_protocol = scipy.interpolate.interp1d(times, voltages, kind="linear") staircase_protocol_safe = lambda t: staircase_protocol(t) if t < times[ -1] else par.holding_potential funcs = GetSensitivityEquations(par, p, y, v, A, B, para, times, voltage=staircase_protocol_safe) ret = funcs.SimulateForwardModelSensitivities(para), current = ret[0][0] S1 = ret[0][1] S1n = S1 * np.array(para)[None, :] sens_inf_N = sens_inf * np.array(para)[None, :] param_labels = ['S(p' + str(i + 1) + ',t)' for i in range(par.n_params)] [ plt.plot(funcs.times, sens, label=param_labels[i]) for i, sens in enumerate(S1n.T) ] [plt.axhline(s) for s in sens_inf_N[0, :]] plt.legend() plt.xlabel("time /ms") plt.ylabel("dI(t)/dp") if args.plot: plt.show() else: plt.savefig(os.path.join(plot_dir, "sensitivities_plot")) state_variables = funcs.GetStateVariables(para) state_labels = ['C', 'O', 'I', 'IC'] param_labels = ['S(p' + str(i + 1) + ',t)' for i in range(par.n_params)] fig = plt.figure(figsize=(8, 8), dpi=args.dpi) ax1 = fig.add_subplot(411) ax1.plot(funcs.times, funcs.GetVoltage()) ax1.grid(True) ax1.set_xticklabels([]) ax1.set_ylabel('Voltage (mV)') [ax1.axvline(spike, color='red') for spike in spikes] ax2 = fig.add_subplot(412) ax2.plot(funcs.times, funcs.SimulateForwardModel(para)) ax2.grid(True) ax2.set_xticklabels([]) ax2.set_ylabel('Current (nA)') ax3 = fig.add_subplot(413) for i in range(par.n_state_vars + 1): ax3.plot(funcs.times, state_variables[:, i], label=state_labels[i]) ax3.legend(ncol=4) ax3.grid(True) ax3.set_xticklabels([]) ax3.set_ylabel('State occupancy') ax4 = fig.add_subplot(414) for i in range(par.n_params): ax4.plot(funcs.times, S1n[:, i], label=param_labels[i]) ax4.legend(ncol=3) ax4.grid(True) ax4.set_xlabel('Time (ms)') ax4.set_ylabel('Sensitivities') plt.tight_layout() if not args.plot: plt.savefig( os.path.join(plot_dir, 'ForwardModel_SW_{}.png'.format(args.sine_wave))) # Only take every 100th point # S1n = S1n[0:-1:10] H = np.dot(S1n.T, S1n) print(H) eigvals = np.linalg.eigvals(H) #Sigma2 - the observed variance. 1885 is the value taken from a fit sigma2 = 1885 / (len(funcs.times) - 1) print('Eigenvalues of H:\n{}'.format(eigvals.real)) # Plot the eigenvalues of H, shows the condition of H fig = plt.figure(figsize=(6, 6), dpi=args.dpi) ax = fig.add_subplot(111) for i in eigvals: ax.axhline(y=i, xmin=0.25, xmax=0.75) ax.set_yscale('log') ax.set_xticks([]) ax.grid(True) if not args.plot: plt.savefig( os.path.join(plot_dir, 'Eigenvalues_SW_{}.png'.format(args.sine_wave))) if args.plot: plt.show() cov = np.linalg.inv(H * sigma2) for j in range(0, par.n_params): for i in range(j + 1, par.n_params): parameters_to_view = np.array([i, j]) # sub_sens = S1n[:,[i,j]] sub_cov = cov[parameters_to_view[:, None], parameters_to_view] # sub_cov = np.linalg.inv(np.dot(sub_sens.T, sub_sens)*sigma2) eigen_val, eigen_vec = np.linalg.eigh(sub_cov) eigen_val = eigen_val.real if eigen_val[0] > 0 and eigen_val[1] > 0: print("COV_{},{} : well defined".format(i, j)) cov_ellipse(sub_cov, q=[0.75, 0.9, 0.99], offset=para[[i, j]]) plt.ylabel("parameter {}".format(i + 1)) plt.xlabel("parameter {}".format(j + 1)) if args.plot: plt.show() else: plt.savefig( os.path.join( output_dir, "covariance_for_parameters_{}_{}".format( j + 1, i + 1))) plt.clf() else: print("COV_{},{} : negative eigenvalue: {}".format( i, j, eigen_val))
def compute_sensitivity_equations_rhs(self, p, y, v, rhs, para): print('Creating RHS function...') # Inputs for RHS ODEs inputs = [(y[i]) for i in range(self.par.n_state_vars)] [inputs.append(p[j]) for j in range(self.par.n_params)] inputs.append(v) # Create RHS function frhs = [rhs[i] for i in range(self.par.n_state_vars)] self.func_rhs = se.lambdify(inputs, frhs) # Create Jacobian of the RHS function jrhs = [se.Matrix(rhs).jacobian(se.Matrix(y))] self.jfunc_rhs = se.lambdify(inputs, jrhs) print('Creating 1st order sensitivities function...') # Create symbols for 1st order sensitivities dydp = [[ se.symbols('dy%d' % i + 'dp%d' % j) for j in range(self.par.n_params) ] for i in range(self.par.n_state_vars)] # Append 1st order sensitivities to inputs for i in range(self.par.n_params): for j in range(self.par.n_state_vars): inputs.append(dydp[j][i]) # Initialise 1st order sensitivities dS = [[0 for j in range(self.par.n_params)] for i in range(self.par.n_state_vars)] S = [[dydp[i][j] for j in range(self.par.n_params)] for i in range(self.par.n_state_vars)] # Create 1st order sensitivities function fS1, Ss = [], [] for i in range(self.par.n_state_vars): for j in range(self.par.n_params): dS[i][j] = se.diff(rhs[i], p[j]) for l in range(self.par.n_state_vars): dS[i][j] = dS[i][j] + se.diff(rhs[i], y[l]) * S[l][j] # Flatten 1st order sensitivities for function [[fS1.append(dS[i][j]) for i in range(self.par.n_state_vars)] for j in range(self.par.n_params)] [[Ss.append(S[i][j]) for i in range(self.par.n_state_vars)] for j in range(self.par.n_params)] self.func_S1 = se.lambdify(inputs, fS1) # Define number of 1st order sensitivities self.par.n_state_var_sensitivities = self.par.n_params * self.par.n_state_vars # Append 1st order sensitivities to initial conditions dydps = np.zeros((self.par.n_state_var_sensitivities)) # Concatenate RHS and 1st order sensitivities Ss = np.concatenate((list(y), Ss)) fS1 = np.concatenate((frhs, fS1)) # Create Jacobian of the 1st order sensitivities function jS1 = [se.Matrix(fS1).jacobian(se.Matrix(Ss))] self.jfunc_S1 = se.lambdify(inputs, jS1) print('Getting ' + str(self.par.holding_potential) + ' mV steady state initial conditions...') # Set the initial conditions of the model and the initial sensitivities # by finding the steady state of the model # RHS # Can be found analytically rhs_inf = (-(self.A.inv()) * self.B).subs(v, self.par.holding_potential) self.rhs0 = [float(expr.evalf()) for expr in rhs_inf.subs(p, para)] # Steady state can be found analytically S1_inf = [float(se.diff(rhs_inf[i], p[j]).subs(p, para).evalf()) for j in range(0, self.par.n_params) \ for i in range(0, self.par.n_state_vars)] self.drhs0 = np.concatenate((self.rhs0, S1_inf)) print('Done')
""" Tools used across parameter selection modules """ from typing import List, Dict import itertools import numpy as np import symengine from symengine import Symbol from pycalphad import variables as v from espei.utils import build_sitefractions from espei.parameter_selection.redlich_kister import calc_interaction_product feature_transforms = {"CPM_FORM": lambda GM: -v.T*symengine.diff(GM, v.T, 2), "CPM_MIX": lambda GM: -v.T*symengine.diff(GM, v.T, 2), "CPM": lambda GM: -v.T*symengine.diff(GM, v.T, 2), "SM_FORM": lambda GM: -symengine.diff(GM, v.T), "SM_MIX": lambda GM: -symengine.diff(GM, v.T), "SM": lambda GM: -symengine.diff(GM, v.T), "HM_FORM": lambda GM: GM - v.T*symengine.diff(GM, v.T), "HM_MIX": lambda GM: GM - v.T*symengine.diff(GM, v.T), "HM": lambda GM: GM - v.T*symengine.diff(GM, v.T)} def shift_reference_state(desired_data, feature_transform, fixed_model, mole_atoms_per_mole_formula_unit): """ Shift _MIX or _FORM data to a common reference state in per mole-atom units. Parameters ---------- desired_data : List[Dict[str, Any]]
def compute_sensitivity_equations_rhs(self, p, y, v, rhs, para): print('Creating RHS function...') # Inputs for RHS ODEs inputs = [(y[i]) for i in range(self.par.n_state_vars)] [inputs.append(p[j]) for j in range(self.par.n_params)] inputs.append(v) # Create RHS function frhs = [rhs[i] for i in range(self.par.n_state_vars)] self.func_rhs = se.lambdify(inputs, frhs) # Create Jacobian of the RHS function jrhs = [se.Matrix(rhs).jacobian(se.Matrix(y))] self.jfunc_rhs = se.lambdify(inputs, jrhs) print('Creating 1st order sensitivities function...') # Create symbols for 1st order sensitivities dydp = [[ se.symbols('dy%d' % i + 'dp%d' % j) for j in range(self.par.n_params) ] for i in range(self.par.n_state_vars)] # Append 1st order sensitivities to inputs for i in range(self.par.n_params): for j in range(self.par.n_state_vars): inputs.append(dydp[j][i]) # Initialise 1st order sensitivities dS = [[0 for j in range(self.par.n_params)] for i in range(self.par.n_state_vars)] S = [[dydp[i][j] for j in range(self.par.n_params)] for i in range(self.par.n_state_vars)] # Create 1st order sensitivities function fS1, Ss = [], [] for i in range(self.par.n_state_vars): for j in range(self.par.n_params): dS[i][j] = se.diff(rhs[i], p[j]) for l in range(self.par.n_state_vars): dS[i][j] = dS[i][j] + se.diff(rhs[i], y[l]) * S[l][j] # Flatten 1st order sensitivities for function [[fS1.append(dS[i][j]) for i in range(self.par.n_state_vars)] for j in range(self.par.n_params)] [[Ss.append(S[i][j]) for i in range(self.par.n_state_vars)] for j in range(self.par.n_params)] self.auxillary_expression = p[self.par.GKr_index] * y[ self.par.open_state] * (v - self.par.Erev) # dI/do self.dIdo = se.diff(self.auxillary_expression, y[self.par.open_state]) self.func_S1 = se.lambdify(inputs, fS1) # Define number of 1st order sensitivities self.par.n_state_var_sensitivities = self.par.n_params * self.par.n_state_vars # Append 1st order sensitivities to initial conditions dydps = np.zeros((self.par.n_state_var_sensitivities)) # Concatenate RHS and 1st order sensitivities Ss = np.concatenate((list(y), Ss)) fS1 = np.concatenate((frhs, fS1)) # Create Jacobian of the 1st order sensitivities function jS1 = [se.Matrix(fS1).jacobian(se.Matrix(Ss))] self.jfunc_S1 = se.lambdify(inputs, jS1) print('Getting {}mV steady state initial conditions...'.format( self.par.holding_potential)) # Set the initial conditions of the model and the initial sensitivities # by finding the steady state of the model # RHS # Can be found analytically rhs_inf = (-(self.A.inv()) * self.B).subs(v, self.par.holding_potential) rhs_inf_eval = np.array([float(row) for row in rhs_inf.subs(p, para)]) current_inf_expr = self.auxillary_expression.subs(y, rhs_inf_eval) current_inf = float( current_inf_expr.subs(v, self.par.holding_potential).subs( p, para).evalf()) # The limit of the current when voltage is held at the holding potential print("Current limit computed as {}".format(current_inf)) self.rhs0 = rhs_inf_eval # Find sensitivity steady states S1_inf = np.array([ float(se.diff(rhs_inf[i], p[j]).subs(p, para).evalf()) for j in range(0, self.par.n_params) for i in range(0, self.par.n_state_vars) ]) self.drhs0 = np.concatenate((self.rhs0, S1_inf)) index_sensitivities = self.par.n_state_vars + self.par.open_state + self.par.n_state_vars * np.array( range(self.par.n_params)) sens_inf = self.drhs0[index_sensitivities] * ( self.par.holding_potential - self.par.Erev) * para[-1] sens_inf[-1] += (self.par.holding_potential - self.par.Erev) * rhs_inf_eval[self.par.open_state] print("sens_inf calculated as {}".format(sens_inf)) print('Done')