def speed_up(function, parameters): str_params = [str(x) for x in parameters] if len(parameters) == 0: constant_result = np.array(function).astype(float).reshape( function.shape) def f(**kwargs): return constant_result elif BACKEND is None: # @profile def f(**kwargs): filtered_kwargs = {str(k): kwargs[k] for k in str_params} return np.array(function.subs(filtered_kwargs).tolist(), dtype=float).reshape(function.shape) else: if BACKEND == 'numpy' or BACKEND == 'lambda': fast_f = lambdify(list(parameters), function, backend='lambda') elif BACKEND == 'cython' or BACKEND == 'llvm': fast_f = lambdify(list(parameters), function, backend='llvm', real=True) # @profile def f(**kwargs): filtered_args = [kwargs[k] for k in str_params] return np.nan_to_num( np.asarray(fast_f(*filtered_args)).reshape(function.shape)) return f
def _build_constraint_functions(variables, constraints, include_hess=False, parameters=None, cse=True): if parameters is None: parameters = [] else: parameters = [wrap_symbol_symengine(p) for p in parameters] variables = tuple(variables) wrt = variables parameters = tuple(parameters) constraint__func, jacobian_func, hessian_func = None, None, None inp = sympify(variables + parameters) graph = sympify(constraints) constraint_func = lambdify(inp, [graph], backend='llvm', cse=cse) grad_graphs = list(list(c.diff(w) for w in wrt) for c in graph) jacobian_func = lambdify(inp, grad_graphs, backend='llvm', cse=cse) if include_hess: hess_graphs = list( list(list(g.diff(w) for w in wrt) for g in c) for c in grad_graphs) hessian_func = lambdify(inp, hess_graphs, backend='llvm', cse=cse) return ConstraintFunctions(cons_func=constraint_func, cons_jac=jacobian_func, cons_hess=hessian_func)
def build_constraint_functions(variables, constraints, parameters=None, func_options=None, jac_options=None, hess_options=None): """Build callables functions for the constraints, constraint Jacobian, and constraint Hessian. Parameters ---------- variables : List[sympy.core.symbol.Symbol] Free variables in the sympy_graph. By convention these are usually all instances of StateVariables. constraints : List[sympy.core.expr.Expr] List of SymPy expression to compile parameters : Optional[List[sympy.core.symbol.Symbol]] Free variables in the sympy_graph. These are typically external parameters that are controlled by the user. func_options : None, optional Options to pass to ``lambdify`` when compiling the function. jac_options : Optional[Dict[str, str]] Options to pass to ``lambdify`` when compiling the Jacobian. hess_options : Optional[Dict[str, str]] Options to pass to ``lambdify`` when compiling Hessian. Returns ------- ConstraintFunctions Notes ----- Default options for compiling the function, gradient and Hessian are defined by ``_get_lambdify_options``. """ if parameters is None: parameters = [] else: parameters = [wrap_symbol_symengine(p) for p in parameters] variables = tuple(variables) wrt = variables parameters = tuple(parameters) constraint_func, jacobian_func, hessian_func = None, None, None inp = sympify(variables + parameters) graph = sympify(constraints) constraint_func = lambdify(inp, [graph], **_get_lambidfy_options(func_options)) grad_graphs = list(list(c.diff(w) for w in wrt) for c in graph) jacobian_func = lambdify(inp, grad_graphs, **_get_lambidfy_options(jac_options)) hess_graphs = list( list(list(g.diff(w) for w in wrt) for g in c) for c in grad_graphs) hessian_func = lambdify(inp, hess_graphs, **_get_lambidfy_options(hess_options)) return ConstraintFunctions(cons_func=constraint_func, cons_jac=jacobian_func, cons_hess=hessian_func)
def build_constraint_functions(variables, constraints, parameters=None, func_options=None, jac_options=None, hess_options=None): """Build callables functions for the constraints, constraint Jacobian, and constraint Hessian. Parameters ---------- variables : List[symengine.Symbol] Free variables in the symengine_graph. By convention these are usually all instances of StateVariables. constraints : List[symengine.Basic] List of SymEngine expression to compile parameters : Optional[List[symengine.Symbol]] Free variables in the symengine_graph. These are typically external parameters that are controlled by the user. func_options : None, optional Options to pass to ``lambdify`` when compiling the function. jac_options : Optional[Dict[str, str]] Options to pass to ``lambdify`` when compiling the Jacobian. hess_options : Optional[Dict[str, str]] Options to pass to ``lambdify`` when compiling Hessian. Returns ------- ConstraintFunctions Notes ----- Default options for compiling the function, gradient and Hessian are defined by ``_get_lambdify_options``. """ if parameters is None: parameters = [] else: parameters = [wrap_symbol(p) for p in parameters] variables = tuple(variables) wrt = variables parameters = tuple(parameters) constraint_func, jacobian_func, hessian_func = None, None, None # Replace complex infinity (zoo) with real infinity because SymEngine # cannot lambdify complex infinity. We also replace in the derivatives in # case some differentiation would produce a complex infinity. The # replacement is assumed to be cheap enough that it's safer to replace the # complex values and pay the minor time penalty. inp = sympify(variables + parameters) graph = sympify([f.xreplace({zoo: oo}) for f in constraints]) constraint_func = lambdify(inp, [graph], **_get_lambidfy_options(func_options)) grad_graphs = list(list(c.diff(w).xreplace({zoo: oo}) for w in wrt) for c in graph) jacobian_func = lambdify(inp, grad_graphs, **_get_lambidfy_options(jac_options)) hess_graphs = list(list(list(g.diff(w).xreplace({zoo: oo}) for w in wrt) for g in c) for c in grad_graphs) hessian_func = lambdify(inp, hess_graphs, **_get_lambidfy_options(hess_options)) return ConstraintFunctions(cons_func=constraint_func, cons_jac=jacobian_func, cons_hess=hessian_func)
def test_integralKernelMinus(self): n = 10 m = 10 listn = np.zeros(n, dtype=object) vals = np.zeros((n, m), dtype=object) for i in range(n): Plus0 = np.array(CFS_Action.integralKernelMinus(i + 1)).reshape( 2, 2) print(Plus0) #Plus1 = np.array(CFS_Action.integralKernelMinus(i +1), dtype = object).reshape(2,2) listn[i] = si.lambdify(r[0], [ sy.simplify(-2 * si.cos(r[0] / 2) * CFS_Action.prefactor(i) * JacPol(si.Rational(1, 2), si.Rational(3, 2), i) + Plus0[0, 0] + Plus0[1, 1]) ], real=False) #print( sy.simplify(JacPol(1/2,3/2, i)),'=?=', sy.jacobi(i, 1/2,3/2, r[0])) #listn[i] =si.lambdify(r[0], [sy.simplify(JacPol(1/2,3/2, i)) - sy.jacobi(i, 1/2,3/2, r[0])]) for i in range(n): for j in range(m): vals[i, j] = listn[i](j) print(vals) np.testing.assert_almost_equal(vals, np.zeros((n, m)), 10)
def get_neg_likelihood_of_iso_freq(self, within_isomer_ids=None, scipy_style=True): # log_like_formula = self.traversome.get_likelihood_binomial_formula( # self.isomer_percents, # log_func=sympy.log, # within_isomer_ids=within_isomer_ids) log_like_formula = self.traversome.get_multinomial_like_formula( self.isomer_percents, # log_func=sympy.log, log_func=symengine.log, within_isomer_ids=within_isomer_ids) if within_isomer_ids is None: within_isomer_ids = set(range(self.num_of_isomers)) # neg_likelihood_of_iso_freq = sympy.lambdify( neg_likelihood_of_iso_freq = symengine.lambdify( args=[self.isomer_percents[isomer_id] for isomer_id in range(self.num_of_isomers) if isomer_id in within_isomer_ids], # expr=-log_like_formula.loglike_expression) exprs=[-log_like_formula.loglike_expression]) logger.trace("Formula: {}".format(-log_like_formula.loglike_expression)) if scipy_style: # for compatibility between scipy and sympy # positional arguments -> single tuple argument def neg_likelihood_of_iso_freq_single_arg(x): return neg_likelihood_of_iso_freq(*tuple(x)) return LogLikeFuncInfo( loglike_func=neg_likelihood_of_iso_freq_single_arg, variable_size=log_like_formula.variable_size, sample_size=log_like_formula.sample_size) else: return LogLikeFuncInfo( loglike_func=neg_likelihood_of_iso_freq, variable_size=log_like_formula.variable_size, sample_size=log_like_formula.sample_size)
def Action_For_Test(): K = si.symbols('K') term1 = 1 / (6 * si.pi**8) term2 = (2 * K * (15 + 31 * K**2 + 9 * K**4 + 9 * K**6) + 3 * (1 + K**2)**3 * (5 - 3 * K**2) * sy.acos(1 - 2 / (1 + K**2))) / ((1 + K**2)**2) funci = si.lambdify(K, [term1 * term2]) return funci
def _build_constraint_functions(variables, constraints, include_hess=False, parameters=None, cse=True): if parameters is None: parameters = [] else: parameters = [wrap_symbol_symengine(p) for p in parameters] variables = tuple(variables) wrt = variables parameters = tuple(parameters) constraint__func, jacobian_func, hessian_func = None, None, None inp = sympify(variables + parameters) graph = sympify(constraints) constraint_func = lambdify(inp, [graph], backend='llvm', cse=cse) grad_graphs = list(list(c.diff(w) for w in wrt) for c in graph) jacobian_func = lambdify(inp, grad_graphs, backend='llvm', cse=cse) if include_hess: hess_graphs = list(list(list(g.diff(w) for w in wrt) for g in c) for c in grad_graphs) hessian_func = lambdify(inp, hess_graphs, backend='llvm', cse=cse) return ConstraintFunctions(cons_func=constraint_func, cons_jac=jacobian_func, cons_hess=hessian_func)
def convertSym(self, x, bh, target): """Converts the symbolic function to a lambda function Args: Returns: """ return symengine.lambdify(x, bh, backend='lambda')
def diff_funcoes_forma(self): """Retorna uma matriz com as derivadas das funções de forma em relação a x (linha 0) e a y (linha 1). As funções retornadas são funções `lambda`.""" def diff_func(): func_forma = self.funcoes_forma() return symengine.Matrix([[f.diff(v) for f in func_forma] for v in [self._x, self._y]]) if (nlados := self.num_nos) not in self.BANCO_DIFF_FUNCOES_FORMA: func = diff_func() self.BANCO_DIFF_FUNCOES_FORMA[nlados] = symengine.lambdify(list(func.free_symbols), func)
def __set__(self, instance, value): key = hash(value) if key not in self.lambda_funcs: params = [] for p in sorted(value.free_symbols, key=lambda s: s.name): if p.name == "t": # Argument "t" must be placed at first. This is a vector. params.insert(0, p) continue params.append(p) if _optional.HAS_SYMENGINE: try: lamb = sym.lambdify(params, [value], real=False) def _wrapped_lamb(*args): if isinstance(args[0], np.ndarray): # When the args[0] is a vector ("t"), tile other arguments args[1:] # to prevent evaluation from looping over each element in t. t = args[0] args = np.hstack( ( t.reshape(t.size, 1), np.tile(args[1:], t.size).reshape(t.size, len(args) - 1), ) ) return lamb(args) func = _wrapped_lamb except RuntimeError: # Currently symengine doesn't support complex_double version for # several functions such as comparison operator and piecewise. # If expression contains these function, it fall back to sympy lambdify. # See https://github.com/symengine/symengine.py/issues/406 for details. import sympy func = sympy.lambdify(params, value) else: func = sym.lambdify(params, value) self.lambda_funcs[key] = func
def build_functions(sympy_graph, variables, parameters=None, wrt=None, include_obj=True, include_grad=False, include_hess=False, cse=True): if wrt is None: wrt = sympify(tuple(variables)) if parameters is None: parameters = [] else: parameters = [wrap_symbol_symengine(p) for p in parameters] variables = tuple(variables) parameters = tuple(parameters) func, grad, hess = None, None, None inp = sympify(variables + parameters) graph = sympify(sympy_graph) if count_ops(graph) > BACKEND_OPS_THRESHOLD: backend = 'lambda' else: backend = 'llvm' # TODO: did not replace zoo with oo if include_obj: func = lambdify(inp, [graph], backend=backend, cse=cse) if include_grad or include_hess: grad_graphs = list(graph.diff(w) for w in wrt) grad_ops = sum(count_ops(x) for x in grad_graphs) if grad_ops > BACKEND_OPS_THRESHOLD: grad_backend = 'lambda' else: grad_backend = 'llvm' if include_grad: grad = lambdify(inp, grad_graphs, backend=grad_backend, cse=cse) if include_hess: hess_graphs = list( list(g.diff(w) for w in wrt) for g in grad_graphs) # Hessians are hard-coded to always use the lambda backend, for performance hess = lambdify(inp, hess_graphs, backend='lambda', cse=cse) return BuildFunctionsResult(func=func, grad=grad, hess=hess)
def build_functions(sympy_graph, variables, parameters=None, wrt=None, include_obj=True, include_grad=False, include_hess=False, cse=True): if wrt is None: wrt = sympify(tuple(variables)) if parameters is None: parameters = [] else: parameters = [wrap_symbol_symengine(p) for p in parameters] variables = tuple(variables) parameters = tuple(parameters) func, grad, hess = None, None, None inp = sympify(variables + parameters) graph = sympify(sympy_graph) # TODO: did not replace zoo with oo if include_obj: func = lambdify(inp, [graph], backend='llvm', cse=cse) if include_grad or include_hess: grad_graphs = list(graph.diff(w) for w in wrt) if include_grad: grad = lambdify(inp, grad_graphs, backend='llvm', cse=cse) if include_hess: hess_graphs = list(list(g.diff(w) for w in wrt) for g in grad_graphs) hess = lambdify(inp, hess_graphs, backend='llvm', cse=cse) return BuildFunctionsResult(func=func, grad=grad, hess=hess)
def dict_to_func(dictionary): expr = 0 for key in dictionary: term = dictionary[key] for var in key: term *= se.Symbol(var) expr += term if type(expr) == float: f = expr else: var_list = list(expr.free_symbols) var_list.sort(key=sort_disc_func) f = se.lambdify(var_list, (expr, )) return f
def test_JacobiPolys(self): n = 10 m = 10 listn = np.zeros(n, dtype=object) vals = np.zeros((n, m), dtype=object) for i in range(n): #print( sy.simplify(JacPol(sy.Rational(1,2),sy.Rational(3,2), i)),'=?=', sy.jacobi(i, 1/2,3/2, r[0])) listn[i] = si.lambdify(r[0], [ sy.simplify(JacPol(sy.Rational(1, 2), sy.Rational(3, 2), i)) - sy.jacobi(i, sy.Rational(1, 2), sy.Rational(3, 2), r[0]) ]) for i in range(n): for j in range(m): vals[i, j] = listn[i](j)[0] #, maxn = 10) print(vals) np.testing.assert_almost_equal(vals, np.zeros((n, m)), 10)
def test_integralKernelMinus2(self): n = 10 m = 10 listn = np.zeros(n, dtype=object) vals = np.zeros((n, m), dtype=object) for i in range(n): Plus0 = np.array(CFS_Action.integralKernelMinus( i + 1)) #, dtype = object).reshape(2,2) listn[i] = si.lambdify(r[0], [sy.simplify(Plus0[0, 1] + Plus0[1, 0])], real=False) for i in range(n): for j in range(m): vals[i, j] = listn[i](j) print(vals) np.testing.assert_almost_equal(vals, np.zeros((n, m)), 10)
def SS(A, num_iterates=10000, interval_length=0.01): """ Computes equation 3.42: the volume of the number of perturbations that cause a sign switch in some part of the matrix :param A: input matrix, numpy array :return: Scalar (percent of perturbations that caused some sign switch) """ if not is_stable(A): raise Exception( "The input matrix is not stable itself (one or more eigenvalues have non-negative real part). Cannot continue analysis.") t0 = timeit.default_timer() entries_to_perturb = get_entries_to_perturb(A) Ainv = sp.Matrix(np.linalg.inv(A).tolist()) (m,n) = A.shape # get the variables we are going to perturb pert_locations_i, pert_locations_j = np.where(entries_to_perturb) symbol_string = "" for i, j in zip(pert_locations_i, pert_locations_j): symbol_string += "eps_%d_%d " % (i, j) if len(pert_locations_i) == 1: symbol_tup = [sp.symbols(symbol_string)] else: symbol_tup = list(sp.symbols(symbol_string)) # create the matrix with the symbolic perturbation values B_temp = np.zeros(A.shape).tolist() #sp.Matrix(np.zeros(A.shape).tolist()) iter = 0 for i, j in zip(pert_locations_i, pert_locations_j): B_temp[i][j] = symbol_tup[iter] iter += 1 #B_temp = sp.Matrix(B_temp) t1 = timeit.default_timer() print("preprocess time: %f" %(t1 - t0)) t0 = timeit.default_timer() # form the symbolic matrix (A+B)^(-1)./A^(-1) print(B_temp) B = sp.Matrix(B_temp) print(B) AplusB = sp.Matrix(A.tolist()) + B AplusBinv = AplusB.inv() #AplusBinv = AplusB.inv(method='LU', try_block_diag=True) #AplusBinv = AplusB.inv(method='ADJ', try_block_diag=True) t1 = timeit.default_timer() print("AplusBinv time: %f" %(t1 - t0)) t0 = timeit.default_timer() # component-wise division AplusBinvDivAinv = sp.Matrix(np.zeros(A.shape).tolist()) for i in range(A.shape[0]): for j in range(A.shape[1]): AplusBinvDivAinv[i, j] = AplusBinv[i, j] / float(Ainv[i, j]) t1 = timeit.default_timer() print("AplusBinvDivAinv time: %f" % (t1 - t0)) t0 = timeit.default_timer() # lambdafy the symbolic quantity AplusBinvDivAinvEval = sp.lambdify(symbol_tup, AplusBinvDivAinv, "numpy") AplusBEval = sp.lambdify(symbol_tup, AplusB, "numpy") t1 = timeit.default_timer() print("lambdify time: %f" % (t1 - t0)) #num_iterates = 10000 #interval_length = 0.01 switch_count = 0 is_stable_count = 0 # initialize the dictionaries of values to pass eps_dicts = [] for iterate in range(num_iterates): eps_dicts.append(dict()) t0 = timeit.default_timer() # for each one of the symbols, sample from the appropriate distribution for symbol in symbol_tup: symbol_name = symbol.name i = eval(symbol_name.split('_')[1]) j = eval(symbol_name.split('_')[2]) interval = intervals(A[i, j], interval_length) dist = st.uniform(interval[0], interval[1]) vals = dist.rvs(num_iterates) iter = 0 for eps_dict in eps_dicts: eps_dict[symbol_name] = vals[iter] iter += 1 t1 = timeit.default_timer() print("Sample time: %f" % (t1 - t0)) # check for sign switches and stability t0 = timeit.default_timer() for eps_dict in eps_dicts: val_list = [] for var in symbol_tup: val_list.append(eps_dict[var.name]) stab_indicator = is_stable(AplusBEval(*val_list)[0].reshape(A.shape)) if stab_indicator: is_stable_count += 1 if exists_switch(eps_dict, AplusBinvDivAinvEval, symbol_tup, n) and stab_indicator: switch_count += 1 t1 = timeit.default_timer() print("Actual eval time: %f" % (t1 - t0)) #print(switch_count) #print(num_iterates) #print(is_stable_count) return switch_count / float(num_iterates) # this is how it was done in the paper/mathematica
def minimize_expr(expr, angle_folds, amplitude_folds, sampler, max_cycles=5, num_samples=1000, strength=1e3, verbose=False): #get lists of continuous and discrete variables try: all_vars = list(expr.free_symbols) except AttributeError: return expr disc_vars, cont_vars = sort_mixed_vars(all_vars) cont_bounds = get_bounds(cont_vars, angle_folds=angle_folds, amplitude_folds=amplitude_folds) disc_vals = np.random.choice([-1, 1], size=len(disc_vars)) cont_vals = np.random.uniform(low=cont_bounds.transpose()[0], high=cont_bounds.transpose()[1], size=len(cont_vars)) all_disc_vals, all_cont_vals = [], [] #minimize expression min_energies, cycle = [], 0 for cycle in range(max_cycles): #minimize continuous variables for fixed discrete variables cont_expr = expr for i in range(len(disc_vars)): cont_expr = cont_expr.subs(disc_vars[i], disc_vals[i]) f = se.lambdify(cont_vars, (cont_expr, )) def g(x): return f(*x) results = sci.optimize.minimize(g, cont_vals, method='L-BFGS-B', bounds=cont_bounds) cont_vals = results.x #minimize discrete variables for fixed continuous variables disc_expr = expr for i in range(len(cont_vars)): disc_expr = disc_expr.subs(cont_vars[i], cont_vals[i]) disc_expr = se.expand(disc_expr) bqm = dimod.higherorder.utils.make_quadratic(expr_to_dict(disc_expr), strength, dimod.SPIN) qubo, constant = bqm.to_qubo() #run sampler response = sampler.sample_qubo(qubo, num_reads=num_samples) solutions = pd.DataFrame(response.data()) minIndex = int(solutions[['energy']].idxmin()) minEnergy = round(solutions['energy'][minIndex], 12) + constant unredSolution = solutions['sample'][minIndex] for key in unredSolution: try: index = disc_vars.index(key) except ValueError: continue disc_vals[index] = 2 * unredSolution[key] - 1 min_energies += [minEnergy] all_disc_vals += [disc_vals] all_cont_vals += [cont_vals] if verbose: print('Cycle:', cycle + 1, 'Energy:', minEnergy) min_energy = min(min_energies) index = min_energies.index(min_energy) cont_dict = dict(zip(cont_vars, all_cont_vals[index])) disc_dict = dict(zip(disc_vars, all_disc_vals[index])) return min_energy, cont_dict, disc_dict
def compute_sensitivity_equations_rhs(self, p, y, v, rhs, para): print('Creating RHS function...') # Inputs for RHS ODEs inputs = [(y[i]) for i in range(self.par.n_state_vars)] [inputs.append(p[j]) for j in range(self.par.n_params)] inputs.append(v) # Create RHS function frhs = [rhs[i] for i in range(self.par.n_state_vars)] self.func_rhs = se.lambdify(inputs, frhs) # Create Jacobian of the RHS function jrhs = [se.Matrix(rhs).jacobian(se.Matrix(y))] self.jfunc_rhs = se.lambdify(inputs, jrhs) print('Creating 1st order sensitivities function...') # Create symbols for 1st order sensitivities dydp = [[ se.symbols('dy%d' % i + 'dp%d' % j) for j in range(self.par.n_params) ] for i in range(self.par.n_state_vars)] # Append 1st order sensitivities to inputs for i in range(self.par.n_params): for j in range(self.par.n_state_vars): inputs.append(dydp[j][i]) # Initialise 1st order sensitivities dS = [[0 for j in range(self.par.n_params)] for i in range(self.par.n_state_vars)] S = [[dydp[i][j] for j in range(self.par.n_params)] for i in range(self.par.n_state_vars)] # Create 1st order sensitivities function fS1, Ss = [], [] for i in range(self.par.n_state_vars): for j in range(self.par.n_params): dS[i][j] = se.diff(rhs[i], p[j]) for l in range(self.par.n_state_vars): dS[i][j] = dS[i][j] + se.diff(rhs[i], y[l]) * S[l][j] # Flatten 1st order sensitivities for function [[fS1.append(dS[i][j]) for i in range(self.par.n_state_vars)] for j in range(self.par.n_params)] [[Ss.append(S[i][j]) for i in range(self.par.n_state_vars)] for j in range(self.par.n_params)] self.auxillary_expression = p[self.par.GKr_index] * y[ self.par.open_state] * (v - self.par.Erev) # dI/do self.dIdo = se.diff(self.auxillary_expression, y[self.par.open_state]) self.func_S1 = se.lambdify(inputs, fS1) # Define number of 1st order sensitivities self.par.n_state_var_sensitivities = self.par.n_params * self.par.n_state_vars # Append 1st order sensitivities to initial conditions dydps = np.zeros((self.par.n_state_var_sensitivities)) # Concatenate RHS and 1st order sensitivities Ss = np.concatenate((list(y), Ss)) fS1 = np.concatenate((frhs, fS1)) # Create Jacobian of the 1st order sensitivities function jS1 = [se.Matrix(fS1).jacobian(se.Matrix(Ss))] self.jfunc_S1 = se.lambdify(inputs, jS1) print('Getting {}mV steady state initial conditions...'.format( self.par.holding_potential)) # Set the initial conditions of the model and the initial sensitivities # by finding the steady state of the model # RHS # Can be found analytically rhs_inf = (-(self.A.inv()) * self.B).subs(v, self.par.holding_potential) rhs_inf_eval = np.array([float(row) for row in rhs_inf.subs(p, para)]) current_inf_expr = self.auxillary_expression.subs(y, rhs_inf_eval) current_inf = float( current_inf_expr.subs(v, self.par.holding_potential).subs( p, para).evalf()) # The limit of the current when voltage is held at the holding potential print("Current limit computed as {}".format(current_inf)) self.rhs0 = rhs_inf_eval # Find sensitivity steady states S1_inf = np.array([ float(se.diff(rhs_inf[i], p[j]).subs(p, para).evalf()) for j in range(0, self.par.n_params) for i in range(0, self.par.n_state_vars) ]) self.drhs0 = np.concatenate((self.rhs0, S1_inf)) index_sensitivities = self.par.n_state_vars + self.par.open_state + self.par.n_state_vars * np.array( range(self.par.n_params)) sens_inf = self.drhs0[index_sensitivities] * ( self.par.holding_potential - self.par.Erev) * para[-1] sens_inf[-1] += (self.par.holding_potential - self.par.Erev) * rhs_inf_eval[self.par.open_state] print("sens_inf calculated as {}".format(sens_inf)) print('Done')
def build_functions(sympy_graph, variables, parameters=None, wrt=None, include_obj=True, include_grad=False, include_hess=False, func_options=None, grad_options=None, hess_options=None): """Build function, gradient, and Hessian callables of the sympy_graph. Parameters ---------- sympy_graph : sympy.core.expr.Expr SymPy expression to compile, :math:`f(x) : \mathbb{R}^{n} \\rightarrow \mathbb{R}`, which will corresponds to ``sympy_graph(variables+parameters)`` variables : List[sympy.core.symbol.Symbol] Free variables in the sympy_graph. By convention these are usually all instances of StateVariables. parameters : Optional[List[sympy.core.symbol.Symbol]] Free variables in the sympy_graph. These are typically external parameters that are controlled by the user. wrt : Optional[List[sympy.core.symbol.Symbol]] Variables to differentiate *with respect to* for the gradient and Hessian callables. If None, will fall back to ``variables``. include_obj : Optional[bool] Whether to build the sympy_graph callable, :math:`f(x) : \mathbb{R}^{n} \\rightarrow \mathbb{R}` include_grad : Optional[bool] Whether to build the gradient callable, :math:`\\pmb{g}(x) = \\nabla f(x) : \mathbb{R}^{n} \\rightarrow \mathbb{R}^{n}` include_hess : Optional[bool] Whether to build the Hessian callable, :math:`\mathbb{H}(x) = \\nabla^2 f(x) : \mathbb{R}^{n} \\rightarrow \mathbb{R}^{n \\times n}` func_options : Optional[Dict[str, str]] Options to pass to ``lambdify`` when compiling the function. grad_options : Optional[Dict[str, str]] Options to pass to ``lambdify`` when compiling the gradient. hess_options : Optional[Dict[str, str]] Options to pass to ``lambdify`` when compiling Hessian. Returns ------- BuildFunctionsResult Notes ----- Default options for compiling the function, gradient and Hessian are defined by ``_get_lambdify_options``. """ if wrt is None: wrt = sympify(tuple(variables)) if parameters is None: parameters = [] else: parameters = [wrap_symbol_symengine(p) for p in parameters] variables = tuple(variables) parameters = tuple(parameters) func, grad, hess = None, None, None # Replace complex infinity (zoo) with real infinity because SymEngine # cannot lambdify complex infinity. We also replace in the derivatives in # case some differentiation would produce a complex infinity. The # replacement is assumed to be cheap enough that it's safer to replace the # complex values and pay the minor time penalty. inp = sympify(variables + parameters) graph = sympify(sympy_graph).xreplace({zoo: oo}) func = lambdify(inp, [graph], **_get_lambidfy_options(func_options)) if include_grad or include_hess: grad_graphs = list(graph.diff(w).xreplace({zoo: oo}) for w in wrt) if include_grad: grad = lambdify(inp, grad_graphs, **_get_lambidfy_options(grad_options)) if include_hess: hess_graphs = list( list(g.diff(w).xreplace({zoo: oo}) for w in wrt) for g in grad_graphs) hess = lambdify(inp, hess_graphs, **_get_lambidfy_options(hess_options)) return BuildFunctionsResult(func=func, grad=grad, hess=hess)
def compute_sensitivity_equations_rhs(self, p, y, v, rhs, para): print('Creating RHS function...') # Inputs for RHS ODEs inputs = [(y[i]) for i in range(self.par.n_state_vars)] [inputs.append(p[j]) for j in range(self.par.n_params)] inputs.append(v) # Create RHS function frhs = [rhs[i] for i in range(self.par.n_state_vars)] self.func_rhs = se.lambdify(inputs, frhs) # Create Jacobian of the RHS function jrhs = [se.Matrix(rhs).jacobian(se.Matrix(y))] self.jfunc_rhs = se.lambdify(inputs, jrhs) print('Creating 1st order sensitivities function...') # Create symbols for 1st order sensitivities dydp = [[ se.symbols('dy%d' % i + 'dp%d' % j) for j in range(self.par.n_params) ] for i in range(self.par.n_state_vars)] # Append 1st order sensitivities to inputs for i in range(self.par.n_params): for j in range(self.par.n_state_vars): inputs.append(dydp[j][i]) # Initialise 1st order sensitivities dS = [[0 for j in range(self.par.n_params)] for i in range(self.par.n_state_vars)] S = [[dydp[i][j] for j in range(self.par.n_params)] for i in range(self.par.n_state_vars)] # Create 1st order sensitivities function fS1, Ss = [], [] for i in range(self.par.n_state_vars): for j in range(self.par.n_params): dS[i][j] = se.diff(rhs[i], p[j]) for l in range(self.par.n_state_vars): dS[i][j] = dS[i][j] + se.diff(rhs[i], y[l]) * S[l][j] # Flatten 1st order sensitivities for function [[fS1.append(dS[i][j]) for i in range(self.par.n_state_vars)] for j in range(self.par.n_params)] [[Ss.append(S[i][j]) for i in range(self.par.n_state_vars)] for j in range(self.par.n_params)] self.func_S1 = se.lambdify(inputs, fS1) # Define number of 1st order sensitivities self.par.n_state_var_sensitivities = self.par.n_params * self.par.n_state_vars # Append 1st order sensitivities to initial conditions dydps = np.zeros((self.par.n_state_var_sensitivities)) # Concatenate RHS and 1st order sensitivities Ss = np.concatenate((list(y), Ss)) fS1 = np.concatenate((frhs, fS1)) # Create Jacobian of the 1st order sensitivities function jS1 = [se.Matrix(fS1).jacobian(se.Matrix(Ss))] self.jfunc_S1 = se.lambdify(inputs, jS1) print('Getting ' + str(self.par.holding_potential) + ' mV steady state initial conditions...') # Set the initial conditions of the model and the initial sensitivities # by finding the steady state of the model # RHS # Can be found analytically rhs_inf = (-(self.A.inv()) * self.B).subs(v, self.par.holding_potential) self.rhs0 = [float(expr.evalf()) for expr in rhs_inf.subs(p, para)] # Steady state can be found analytically S1_inf = [float(se.diff(rhs_inf[i], p[j]).subs(p, para).evalf()) for j in range(0, self.par.n_params) \ for i in range(0, self.par.n_state_vars)] self.drhs0 = np.concatenate((self.rhs0, S1_inf)) print('Done')