def test_ternary_candidate_models_are_constructed_correctly(): """Candidate models should be generated for all valid combinations of possible models in the ternary case""" features = OrderedDict([("CPM_FORM", (v.T * symengine.log(v.T), v.T**2)), ("SM_FORM", (v.T, )), ("HM_FORM", (symengine.S.One, ))]) YS = symengine.Symbol('YS') V_I, V_J, V_K = symengine.Symbol('V_I'), symengine.Symbol( 'V_J'), symengine.Symbol('V_K') candidate_models = build_candidate_models((('A', 'B', 'C'), 'A'), features) assert candidate_models == OrderedDict([ ('CPM_FORM', [ [v.T * YS * symengine.log(v.T)], [v.T * YS * symengine.log(v.T), v.T**2 * YS], [ v.T * V_I * YS * symengine.log(v.T), v.T * V_J * YS * symengine.log(v.T), v.T * V_K * YS * symengine.log(v.T) ], [ v.T * V_I * YS * symengine.log(v.T), v.T**2 * V_I * YS, v.T * V_J * YS * symengine.log(v.T), v.T**2 * V_J * YS, v.T * V_K * YS * symengine.log(v.T), v.T**2 * V_K * YS ], ]), ('SM_FORM', [[v.T * YS], [v.T * V_I * YS, v.T * V_J * YS, v.T * V_K * YS]]), ('HM_FORM', [[YS], [V_I * YS, V_J * YS, V_K * YS]]) ])
def test_build_feature_sets_generates_desired_binary_features_for_cp_like(): """Binary feature sets can be correctly generated for heat capacity-like features""" YS = symengine.Symbol("YS") Z = symengine.Symbol("Z") temp_features = [v.T, v.T**2, 1 / v.T, v.T**3] excess_features = [YS, YS * Z, YS * Z**2, YS * Z**3] feat_sets = build_feature_sets(temp_features, excess_features) assert len(feat_sets) == 340 assert feat_sets[0] == [v.T * YS] assert feat_sets[5] == [v.T * YS, v.T * YS * Z, v.T**2 * YS * Z] assert feat_sets[-1] == [ v.T * YS, v.T**2 * YS, 1 / v.T * YS, v.T**3 * YS, v.T * YS * Z, v.T**2 * YS * Z, 1 / v.T * YS * Z, v.T**3 * YS * Z, v.T * YS * Z**2, v.T**2 * YS * Z**2, 1 / v.T * YS * Z**2, v.T**3 * YS * Z**2, v.T * YS * Z**3, v.T**2 * YS * Z**3, 1 / v.T * YS * Z**3, v.T**3 * YS * Z**3, ]
def test_substitute_helpers(self): backend = NumbaBackend() a = se.Symbol("a") b = se.Symbol("b") y = se.Symbol("y") HELPERS = [(a, se.exp(-12 * y))] DERIVATIVES = [-b * a + y, y**2] result = backend._substitute_helpers(DERIVATIVES, HELPERS) self.assertListEqual(result, [-b * se.exp(-12 * y) + y, y**2])
def test_binary_candidate_models_are_constructed_correctly(): """Candidate models should be generated for all valid combinations of possible models in the binary case""" features = OrderedDict([("CPM_FORM", (v.T*symengine.log(v.T), v.T**2)), ("SM_FORM", (v.T,)), ("HM_FORM", (symengine.S.One,)) ]) YS = symengine.Symbol('YS') Z = symengine.Symbol('Z') candidate_models = build_candidate_models((('A', 'B'), 'A'), features) assert candidate_models == OrderedDict([ ('CPM_FORM', [ [v.T*YS*symengine.log(v.T)], [v.T*YS*symengine.log(v.T), v.T**2*YS], [v.T*YS*symengine.log(v.T), v.T*YS*Z*symengine.log(v.T)], [v.T*YS*symengine.log(v.T), v.T*YS*Z*symengine.log(v.T), v.T**2*YS*Z], [v.T*YS*symengine.log(v.T), v.T**2*YS, v.T*YS*Z*symengine.log(v.T)], [v.T*YS*symengine.log(v.T), v.T**2*YS, v.T*YS*Z*symengine.log(v.T), v.T**2*YS*Z], [v.T*YS*symengine.log(v.T), v.T*YS*Z*symengine.log(v.T), v.T*YS*Z**2*symengine.log(v.T)], [v.T*YS*symengine.log(v.T), v.T*YS*Z*symengine.log(v.T), v.T*YS*Z**2*symengine.log(v.T), v.T**2*YS*Z**2], [v.T*YS*symengine.log(v.T), v.T*YS*Z*symengine.log(v.T), v.T**2*YS*Z, v.T*YS*Z**2*symengine.log(v.T)], [v.T*YS*symengine.log(v.T), v.T*YS*Z*symengine.log(v.T), v.T**2*YS*Z, v.T*YS*Z**2*symengine.log(v.T), v.T**2*YS*Z**2], [v.T*YS*symengine.log(v.T), v.T**2*YS, v.T*YS*Z*symengine.log(v.T), v.T*YS*Z**2*symengine.log(v.T)], [v.T*YS*symengine.log(v.T), v.T**2*YS, v.T*YS*Z*symengine.log(v.T), v.T*YS*Z**2*symengine.log(v.T), v.T**2*YS*Z**2], [v.T*YS*symengine.log(v.T), v.T**2*YS, v.T*YS*Z*symengine.log(v.T), v.T**2*YS*Z, v.T*YS*Z**2*symengine.log(v.T)], [v.T*YS*symengine.log(v.T), v.T**2*YS, v.T*YS*Z*symengine.log(v.T), v.T**2*YS*Z, v.T*YS*Z**2*symengine.log(v.T), v.T**2*YS*Z**2], [v.T*YS*symengine.log(v.T), v.T*YS*Z*symengine.log(v.T), v.T*YS*Z**2*symengine.log(v.T), v.T*YS*Z**3*symengine.log(v.T)], [v.T*YS*symengine.log(v.T), v.T*YS*Z*symengine.log(v.T), v.T*YS*Z**2*symengine.log(v.T), v.T*YS*Z**3*symengine.log(v.T), v.T**2*YS*Z**3], [v.T*YS*symengine.log(v.T), v.T*YS*Z*symengine.log(v.T), v.T*YS*Z**2*symengine.log(v.T), v.T**2*YS*Z**2, v.T*YS*Z**3*symengine.log(v.T)], [v.T*YS*symengine.log(v.T), v.T*YS*Z*symengine.log(v.T), v.T*YS*Z**2*symengine.log(v.T), v.T**2*YS*Z**2, v.T*YS*Z**3*symengine.log(v.T), v.T**2*YS*Z**3], [v.T*YS*symengine.log(v.T), v.T*YS*Z*symengine.log(v.T), v.T**2*YS*Z, v.T*YS*Z**2*symengine.log(v.T), v.T*YS*Z**3*symengine.log(v.T)], [v.T*YS*symengine.log(v.T), v.T*YS*Z*symengine.log(v.T), v.T**2*YS*Z, v.T*YS*Z**2*symengine.log(v.T), v.T*YS*Z**3*symengine.log(v.T), v.T**2*YS*Z**3], [v.T*YS*symengine.log(v.T), v.T*YS*Z*symengine.log(v.T), v.T**2*YS*Z, v.T*YS*Z**2*symengine.log(v.T), v.T**2*YS*Z**2, v.T*YS*Z**3*symengine.log(v.T)], [v.T*YS*symengine.log(v.T), v.T*YS*Z*symengine.log(v.T), v.T**2*YS*Z, v.T*YS*Z**2*symengine.log(v.T), v.T**2*YS*Z**2, v.T*YS*Z**3*symengine.log(v.T), v.T**2*YS*Z**3], [v.T*YS*symengine.log(v.T), v.T**2*YS, v.T*YS*Z*symengine.log(v.T), v.T*YS*Z**2*symengine.log(v.T), v.T*YS*Z**3*symengine.log(v.T)], [v.T*YS*symengine.log(v.T), v.T**2*YS, v.T*YS*Z*symengine.log(v.T), v.T*YS*Z**2*symengine.log(v.T), v.T*YS*Z**3*symengine.log(v.T), v.T**2*YS*Z**3], [v.T*YS*symengine.log(v.T), v.T**2*YS, v.T*YS*Z*symengine.log(v.T), v.T*YS*Z**2*symengine.log(v.T), v.T**2*YS*Z**2, v.T*YS*Z**3*symengine.log(v.T)], [v.T*YS*symengine.log(v.T), v.T**2*YS, v.T*YS*Z*symengine.log(v.T), v.T*YS*Z**2*symengine.log(v.T), v.T**2*YS*Z**2, v.T*YS*Z**3*symengine.log(v.T), v.T**2*YS*Z**3], [v.T*YS*symengine.log(v.T), v.T**2*YS, v.T*YS*Z*symengine.log(v.T), v.T**2*YS*Z, v.T*YS*Z**2*symengine.log(v.T), v.T*YS*Z**3*symengine.log(v.T)], [v.T*YS*symengine.log(v.T), v.T**2*YS, v.T*YS*Z*symengine.log(v.T), v.T**2*YS*Z, v.T*YS*Z**2*symengine.log(v.T), v.T*YS*Z**3*symengine.log(v.T), v.T**2*YS*Z**3], [v.T*YS*symengine.log(v.T), v.T**2*YS, v.T*YS*Z*symengine.log(v.T), v.T**2*YS*Z, v.T*YS*Z**2*symengine.log(v.T), v.T**2*YS*Z**2, v.T*YS*Z**3*symengine.log(v.T)], [v.T*YS*symengine.log(v.T), v.T**2*YS, v.T*YS*Z*symengine.log(v.T), v.T**2*YS*Z, v.T*YS*Z**2*symengine.log(v.T), v.T**2*YS*Z**2, v.T*YS*Z**3*symengine.log(v.T), v.T**2*YS*Z**3] ]), ('SM_FORM', [ [v.T*YS], [v.T*YS, v.T*YS*Z], [v.T*YS, v.T*YS*Z, v.T*YS*Z**2], [v.T*YS, v.T*YS*Z, v.T*YS*Z**2, v.T*YS*Z**3] ]), ('HM_FORM', [ [YS], [YS, YS*Z], [YS, YS*Z, YS*Z**2], [YS, YS*Z, YS*Z**2, YS*Z**3] ]) ])
def qubit_op_to_expr(qubit_op, angle_folds=0): qubit_op.compress() dict_op = qubit_op.terms expr = 0 for key in dict_op: term = dict_op[key] for var in key: num, char = var if char == 'X': term *= se.cos(se.Symbol('phi' + str(num))) * se.sin( se.Symbol('the' + str(num))) if angle_folds == 3: term *= se.Symbol('W' + str(num)) if char == 'Y': term *= se.sin(se.Symbol('phi' + str(num))) * se.sin( se.Symbol('the' + str(num))) if angle_folds > 1: term *= se.Symbol('Q' + str(num)) if char == 'Z': term *= se.cos(se.Symbol('the' + str(num))) if angle_folds > 0: term *= se.Symbol('Z' + str(num)) expr += term return expr
def get_coherent_state(bloch_angles, n): coherent_state = 1 for i in range(n): try: phi = bloch_angles[se.Symbol('phi' + str(i))] except KeyError: phi = 0 the = bloch_angles[se.Symbol('the' + str(i))] psi = np.cos(the / 2) * np.array([1, 0]) + np.exp(1j * phi) * np.sin( the / 2) * np.array([0, 1]) coherent_state = np.kron(coherent_state, psi) coherent_state = np.reshape(coherent_state, (-1, 1)) return coherent_state
def QMF(qubit_H, angle_folds, sampler, num_cycles=5, num_samples=1000, strength=1e3, verbose=False): n = count_qubits(qubit_H) expr = qubit_op_to_expr(qubit_H, angle_folds=angle_folds) QMF_energy, cont_dict, disc_dict = minimize_expr(expr, angle_folds, 0, sampler, max_cycles=num_cycles, num_samples=num_samples, strength=strength, verbose=verbose) for key in cont_dict: num = str(key)[3:] if str(key)[:3] == 'phi': if angle_folds == 3: try: W = disc_dict[se.Symbol('W' + str(num))] if W == -1: cont_dict[key] = np.pi - cont_dict[key] except KeyError: pass if angle_folds >= 2: try: Q = disc_dict[se.Symbol('Q' + str(num))] if Q == -1: cont_dict[key] = 2 * np.pi - cont_dict[key] except KeyError: pass elif str(key)[:3] == 'the': if angle_folds >= 1: try: Z = disc_dict[se.Symbol('Z' + str(num))] if Z == -1: cont_dict[key] = np.pi - cont_dict[key] except KeyError: pass return QMF_energy, cont_dict
def subs(self, parameter_map: Dict) -> "ParameterExpression": """Returns a new Expression with replacement Parameters. Args: parameter_map: Mapping from Parameters in self to the ParameterExpression instances with which they should be replaced. Raises: CircuitError: - If parameter_map contains Parameters outside those in self. - If the replacement Parameters in parameter_map would result in a name conflict in the generated expression. Returns: A new expression with the specified parameters replaced. """ inbound_parameters = set() inbound_names = {} for replacement_expr in parameter_map.values(): for p in replacement_expr.parameters: inbound_parameters.add(p) inbound_names[p.name] = p self._raise_if_passed_unknown_parameters(parameter_map.keys()) self._raise_if_parameter_names_conflict(inbound_names, parameter_map.keys()) if _optionals.HAS_SYMENGINE: import symengine new_parameter_symbols = { p: symengine.Symbol(p.name) for p in inbound_parameters } else: from sympy import Symbol new_parameter_symbols = { p: Symbol(p.name) for p in inbound_parameters } # Include existing parameters in self not set to be replaced. new_parameter_symbols.update({ p: s for p, s in self._parameter_symbols.items() if p not in parameter_map }) # If new_param is an expr, we'll need to construct a matching sympy expr # but with our sympy symbols instead of theirs. symbol_map = { self._parameter_symbols[old_param]: new_param._symbol_expr for old_param, new_param in parameter_map.items() } substituted_symbol_expr = self._symbol_expr.subs(symbol_map) return ParameterExpression(new_parameter_symbols, substituted_symbol_expr)
def __init__(self, params, seed=None): """ :param params: parameters of the neural mass :type params: dict :param seed: seed for random number generator :type seed: int|None """ assert isinstance(params, dict) self.params = deepcopy(params) self.seed = seed # used in determining portion of the full system's state vector self.idx_state_var = None self.initialised = False # initialise possible helpers self.helper_symbols = { symbol: se.Symbol(symbol) for symbol in self.helper_variables } # initialise possible callback functions self.callback_functions = { function: se.Function(function) for function in self.python_callbacks } self._validate_params()
def test_numeric(self): t = symengine.Symbol("t") for method in ["gauss", "midpoint"]: with self.subTest(method=method): result = quadrature(t**2, t, 0, 1, nsteps=100, method=method) self.assertAlmostEqual(float(result.n().real_part()), 1 / 3, places=3)
def __setstate__(self, state): self._name = state["name"] if not HAS_SYMENGINE: from sympy import Symbol symbol = Symbol(self._name) else: symbol = symengine.Symbol(self._name) super().__init__(symbol_map={self: symbol}, expr=symbol)
def validate_parameters(self, parameter_values, use_cache=False): """Validate a dict or Series of parameter values Currently checks that all covariance matrices are posdef use_cache for using symengine cached matrices """ if use_cache and not hasattr(self, '_cached_sigmas'): self._cached_sigmas = {} for rvs, dist in self.distributions(): if len(rvs) > 1: sigma = dist.sigma a = [ [symengine.Symbol(e.name) for e in sigma.row(i)] for i in range(sigma.rows) ] A = symengine.Matrix(a) self._cached_sigmas[rvs[0]] = A for rvs, dist in self.distributions(): if len(rvs) > 1: if not use_cache: sigma = dist.sigma.subs(dict(parameter_values)) # Switch to numpy here. Sympy posdef check is problematic # see https://github.com/sympy/sympy/issues/18955 if not sigma.free_symbols: a = np.array(sigma).astype(np.float64) if not pharmpy.math.is_posdef(a): return False else: sigma = self._cached_sigmas[rvs[0]] replacement = {} # Following because https://github.com/symengine/symengine/issues/1660 for param in dict(parameter_values): replacement[symengine.Symbol(param)] = parameter_values[param] sigma = sigma.subs(replacement) if not sigma.free_symbols: # Cannot validate since missing params a = np.array(sigma).astype(np.float64) if not pharmpy.math.is_posdef(a): return False return True
def test_comparison(self): interval = (-3, 10) t = symengine.Symbol("t") spline = CubicHermiteSpline(n=2) spline.from_function( [symengine.sin(t), symengine.cos(t)], times_of_interest=interval, max_anchors=100, ) times = np.linspace(*interval, 100) evaluation = spline.get_state(times) control = np.vstack((np.sin(times), np.cos(times))).T assert_allclose(evaluation, control, atol=0.01)
def test_simple_polynomial(self): T = symengine.Symbol("T") poly = 2 * T**3 - 3 * T**2 - 36 * T + 17 arg_extremes = [-2, 3] arrify = lambda expr, t: np.atleast_1d(float(expr.subs({T: t}))) spline = CubicHermiteSpline( 1, [(t, arrify(poly, t), arrify(poly.diff(T), t)) for t in arg_extremes]) result = extrema_from_anchors(spline) assert_allclose(result.minima, arrify(poly, arg_extremes[1])) assert_allclose(result.maxima, arrify(poly, arg_extremes[0])) assert_allclose(result.arg_min, arg_extremes[1]) assert_allclose(result.arg_max, arg_extremes[0])
def solve_chi_saddlepoint(mu, Sigma): """Compute the saddlepoint approximation for the generalized chi square distribution given a mean and a covariance matrix. Currently has two different ways of solving: 1. If the mean is close to zero, the system can be solved symbolically.""" P = None eigenvalues, eigenvectors = np.linalg.eig(Sigma) if (eigenvectors == np.diag(eigenvalues)).all(): P = np.eye(len(mu)) else: P = eigenvectors.T Sigma_12 = np.linalg.cholesky(Sigma) b = P @ Sigma_12 @ mu x = sym.Symbol("x") t = sym.Symbol("t") # Cumulant function K = 0 for i, l in enumerate(eigenvalues): K += (t * b[i]**2 * l) / (1 - 2 * t * l) - 1 / 2 * sym.log(1 - 2 * l * t) Kp = sym.diff(K, t).simplify() Kpp = sym.diff(K, t, t).simplify() roots = sym.lib.symengine_wrapper.solve(sym.Eq(Kp, x), t).args if len(roots) > 1: for expr in roots: trial = Kpp.subs(t, expr).subs(x, np.dot(b, b)) if trial >= 0.0: s_hat = expr else: s_hat = roots[0] f = 1 / sym.sqrt(2 * sym.pi * Kpp.subs( t, s_hat)) * sym.exp(K.subs(t, s_hat) - s_hat * x) fp = sym.Lambdify(x, f.simplify()) c = integrate.quad(fp, 0, np.inf)[0] return lambda x: 1 / c * fp(x)
def __init__( self, f, g, y, t=0.0, f_helpers=(), g_helpers=(), control_pars=(), seed=None, additive=False, do_cse=False, ): self.state = y self.n = len(self.state) self.t = t self.parameters = [] self.noises = [] self.noise_index = None self.new_y = None self.new_t = None self.RNG = np.random.RandomState(seed) self.additive = additive from jitcsde import t, y f_subs = list(reversed(f_helpers)) g_subs = list(reversed(g_helpers)) lambda_args = [t] for i in range(self.n): symbol = symengine.Symbol("dummy_argument_%i" % i) lambda_args.append(symbol) f_subs.append((y(i), symbol)) g_subs.append((y(i), symbol)) lambda_args.extend(control_pars) f_wc = list( ordered_subs(entry, f_subs).simplify(ratio=1) for entry in f()) g_wc = list( ordered_subs(entry, g_subs).simplify(ratio=1) for entry in g()) lambdify = symengine.LambdifyCSE if do_cse else symengine.Lambdify core_f = lambdify(lambda_args, f_wc) self.f = lambda t, Y: core_f(np.hstack([t, Y, self.parameters])) if self.additive: core_g = lambdify([t] + list(control_pars), g_wc) self.g = lambda t: core_g(np.hstack([t, self.parameters])) else: core_g = lambdify(lambda_args, g_wc) self.g = lambda t, Y: core_g(np.hstack([t, Y, self.parameters]))
def __init__(self, name: str): """Create a new named :class:`Parameter`. Args: name: name of the ``Parameter``, used for visual representation. This can be any unicode string, e.g. "ϕ". """ self._name = name if not HAS_SYMENGINE: from sympy import Symbol symbol = Symbol(name) else: symbol = symengine.Symbol(name) super().__init__(symbol_map={self: symbol}, expr=symbol)
def _prepare_lambdas(self): if not hasattr(self, "_lambda_subs") or not hasattr( self, "_lambda_args"): if self.helpers: warn( "Lambdification handles helpers by plugging them in. This may be very inefficient" ) self._lambda_subs = list(reversed(self.helpers)) self._lambda_args = [t] for i in range(self.n): symbol = symengine.Symbol("dummy_argument_%i" % i) self._lambda_args.append(symbol) self._lambda_subs.append((y(i), symbol)) self._lambda_args.extend(self.control_pars)
def dict_to_func(dictionary): expr = 0 for key in dictionary: term = dictionary[key] for var in key: term *= se.Symbol(var) expr += term if type(expr) == float: f = expr else: var_list = list(expr.free_symbols) var_list.sort(key=sort_disc_func) f = se.lambdify(var_list, (expr, )) return f
def var(self, name, shape, lower_bound, upper_bound, vtype, obj): """Add a variable to the model This method will create a matrix with dimensions `shape` that is filled with SymPy symbols of name `name_{i,j}`, where `i` and `j` are indices along the rows and columns, respectively. This is the return value. It will also create convert CPlex-structured variables for the names, upper- and lower bounds, types, and **obj? and add them to the CPlex model it is derived from. name -- The name of the variable; will be indexed as "name_{i,j}" shape -- A tuple with the dimension lengths [rows x columns] lower_bound -- The lower value limit a variable can take upper_bound -- The upper value limit a variable can take vtype -- Type of the variable; 'I' integer, 'B' binary; 'C' count (I>=0) obj -- return -- A numpy matrix with shape `shape` filled with SymPy symbols `name_{i,j}` """ if name in self._shape.keys(): raise ("Variable with name {} already added".format(name)) else: self._shape[name] = shape #TODO: this should reference species name, not index ijstr = lambda i, j: sympy.Symbol(name + "_{" + str(int(i)) + "," + str(int(j)) + "}") matrix = np.array(np.fromfunction(np.vectorize(ijstr), shape)) names = [str(e) for e in matrix.flatten().tolist()] nvars = len(self._idx) ntup = zip(names, range(nvars, nvars + len(names))) self._idx.update({k: v for k, v in ntup}) var = {} var['names'] = names var['ub'] = [upper_bound] * matrix.size var['lb'] = [lower_bound] * matrix.size var['types'] = [vtype] * matrix.size if np.isscalar(obj): var['obj'] = [obj] * matrix.size else: var['obj'] = np.array(obj).flatten().tolist() #TODO: check if non-scalar args have the right length self.variables.add(**var) return np.matrix(matrix)
def init_node(self, **kwargs): """ Initialise node and all the masses within. :kwargs: optional keyword arguments to init_mass """ # we need to have the index already assigned assert self.index is not None # initialise possible sync variables self.sync_symbols = { f"{symbol}_{self.index}": se.Symbol(f"{symbol}_{self.index}") for symbol in self.sync_variables } for mass in self: mass.init_mass(**kwargs) assert all(mass.initialised for mass in self) self.initialised = True
def init_network(self, **kwargs): """ Initialise network and the nodes within. :kwargs: optional keyword arguments to init_node """ # create symbol for each node for input self.sync_symbols = { f"{symbol}_{node_idx}": se.Symbol(f"{symbol}_{node_idx}") for symbol in self.sync_variables for node_idx in range(self.num_nodes) } for node_idx, node in enumerate(self.nodes): node.init_node(start_idx_for_noise=node_idx * node.num_noise_variables) assert all(node.initialised for node in self) self.initialised = True
def validate_parameters(self, parameter_values): """Validate a dict or Series of parameter values Currently checks that all covariance matrices are posdef use_cache for using symengine cached matrices """ for rvs, dist in self.distributions(): if len(rvs) > 1: sigma = rvs[0]._symengine_variance replacement = {} for param in dict(parameter_values): replacement[symengine.Symbol( param)] = parameter_values[param] sigma = sigma.subs(replacement) if not sigma.free_symbols: # Cannot validate since missing params a = np.array(sigma).astype(np.float64) if not pharmpy.math.is_posdef(a): return False return True
def __init__( self, f_sym=(), *, helpers=None, wants_jacobian=False, n=None, control_pars=(), callback_functions=(), verbose=True, module_location=None, ): jitcxde.__init__(self, n, verbose, module_location) self.f_sym = self._handle_input(f_sym) self._f_C_source = False self._jac_C_source = False self._helper_C_source = False self.helpers = sort_helpers(sympify_helpers(helpers or [])) self.control_pars = control_pars self.control_par_values = () self.callback_functions = callback_functions self._wants_jacobian = wants_jacobian self.integrator = empty_integrator() if self.jitced is None: self._initialise = None self.f = None self.jac = None else: # Load derivative and Jacobian if a compiled module has been provided self._initialise = self.jitced.initialise self.f = self.jitced.f self.jac = self.jitced.jac if hasattr(self.jitced, "jac") else None self._number_of_jac_helpers = None self._number_of_f_helpers = None self.general_subs = { control_par: symengine.Symbol("parameter_" + control_par.name) for control_par in self.control_pars }
def _prepare_lambdas(self): if self.callback_functions: raise NotImplementedError( "Callbacks do not work with lambdification. You must use the C backend." ) if not hasattr(self, "_lambda_subs") or not hasattr( self, "_lambda_args"): if self.helpers: warn( "Lambdification handles helpers by plugging them in. This may be very inefficient" ) self._lambda_subs = list(reversed(self.helpers)) self._lambda_args = [t] for i in range(self.n): symbol = symengine.Symbol("dummy_argument_%i" % i) self._lambda_args.append(symbol) self._lambda_subs.append((y(i), symbol)) self._lambda_args.extend(self.control_pars)
def setUp(self): interval = (-3, 2) self.times = np.linspace(*interval, 10) t = symengine.Symbol("t") self.sin_spline = CubicHermiteSpline(n=1) self.sin_spline.from_function( [symengine.sin(t)], times_of_interest=interval, max_anchors=100, ) self.sin_evaluation = self.sin_spline.get_state(self.times) self.exp_spline = CubicHermiteSpline(n=1) self.exp_spline.from_function( [symengine.exp(t)], times_of_interest=interval, max_anchors=100, ) self.exp_evaluation = self.exp_spline.get_state(self.times)
def point_estimate(self): # self.isomer_percents = [sympy.Symbol("P" + str(isomer_id)) for isomer_id in range(self.num_of_isomers)] self.isomer_percents = [symengine.Symbol("P" + str(isomer_id)) for isomer_id in range(self.num_of_isomers)] logger.info("Generating the likelihood function .. ") self.pe_neg_loglike_function = self.get_neg_likelihood_of_iso_freq( within_isomer_ids=set(self.traversome.represent_for_isomers))\ .loglike_func logger.info("Maximizing the likelihood function .. ") success_run = self.minimize_neg_likelihood( neg_loglike_func=self.pe_neg_loglike_function, num_variables=self.num_of_isomers, verbose=self.traversome.loglevel in ("TRACE", "ALL")) if success_run: # for run_res in sorted(success_runs, key=lambda x: x.fun): # logger.info(str(run_res.fun) + str([round(m, 8) for m in run_res.x])) self.pe_best_proportions, echo_prop = self.__summarize_run_prop(success_run) logger.info("Proportions: " + ", ".join(["%s:%.4f" % (_id, _p) for _id, _p, in echo_prop.items()])) logger.info("Log-likelihood: %s" % (-success_run.fun)) return self.pe_best_proportions else: raise Exception("Likelihood maximization failed.")
def test_random_function(self): roots = np.sort(np.random.normal(size=5)) value = np.random.normal() t = symengine.Symbol("t") function = np.prod([t - root for root in roots]) + value i = 1 spline = CubicHermiteSpline(n=3) spline.from_function( [10, function, 10], times_of_interest=(min(roots) - 0.01, max(roots) + 0.01), max_anchors=1000, tol=7, ) solutions = spline.solve(i=i, value=value) sol_times = [sol[0] for sol in solutions] assert_allclose(spline.get_state(sol_times)[:, i], value) assert_allclose([sol[0] for sol in solutions], roots, atol=1e-3) for time, diff in solutions: true_diff = float(function.diff(t).subs({t: time})) self.assertAlmostEqual(true_diff, diff, places=5)
def kmc_test(dt, runner, ks): Y = symengine.Symbol("Y") F = scenario["F"].subs(y(0), Y) G = scenario["G"].subs(y(0), Y) Fd = F.diff(Y) Gd = G.diff(Y) Fdd = Fd.diff(Y) Gdd = Gd.diff(Y) # Theoretical expectation M = [ symengine.Lambdify([Y], F + (F * Fd + G**2 * Fdd / 2) * dt / 2), symengine.Lambdify([Y], G**2 + (2 * F * (F + G * Gd) + G**2 * (2 * Fd + Gd**2 + G * Gdd)) * dt / 2), symengine.Lambdify([Y], 3 * G**2 * (F + G * Gd) * dt), symengine.Lambdify([Y], 3 * G**4 * dt), symengine.Lambdify([Y], 15 * G**4 * (F + 2 * G * Gd) * dt**2), symengine.Lambdify([Y], 15 * G**6 * dt**2) ] # Numerical estimate bins, *kmcs = KMC(runner(), dt, kmax=kmax, nbins=nbins) # Comparing the two i = 0 while i < len(ks): k = ks[i] good = 0 for X, value, error in zip(bins, *kmcs[k]): theory = M[k](X) if value - error < theory < value + error: good += 1 if good < thresholds[k] * len(bins): i += 1 else: ks.pop(i) return ks
def test_no_method(self): t = symengine.Symbol("t") with self.assertRaises(NotImplementedError): quadrature(t**2,t,0,1,method="tai")