def from_str(cls, s, varids, subvarids=None, pids=None, p0=None, polyid='', convarvals=None): vars_ = sympy.symbols(varids) poly = Polynomial(s, vars_) #super(Polynomial, self).__init__(s, vars_) poly.varids = varids poly.subvarids = subvarids poly.pids = pids poly.p0 = butil.Series(p0, pids) if convarvals is None: convarvals = butil.Series([]) poly.convarvals = convarvals poly.polyid = polyid return poly
def get_h(self, keep1=True): """ c = h(p) """ hs = [poly.get_h(keep1=keep1) for poly in self] yids = butil.flatten([h_.yids for h_ in hs]) coefstrs = butil.flatten([h_.frepr.tolist() for h_ in hs]) senstrs = [[exprmanip.simplify_expr(exprmanip.diff_expr(coefstr, pid)) for pid in self.pids] for coefstr in coefstrs] hstr = str(coefstrs).replace("'", "") Dhstr = str(senstrs).replace("'", "") subs0 = dict(butil.flatten([poly.convarvals.items() for poly in self], depth=1)) def f(p): subs = subs0.copy() subs.update(dict(zip(self.pids, p))) return np.array(eval(hstr, subs)) def Df(p): subs = subs0.copy() subs.update(dict(zip(self.pids, p))) return np.array(eval(Dhstr, subs)) h = predict.Predict(f=f, Df=Df, pids=self.pids, p0=self.p0, yids=yids, frepr=butil.Series(coefstrs, yids), Dfrepr=butil.DF(senstrs, yids, self.pids)) return h
def get_h(self, keep1=True): """ """ coefs = self.get_coefs() if not keep1: coefs = butil.get_submapping(coefs, f_value=lambda s: s != '1') subcoefids, subcoefstrs = coefs.keys(), coefs.values() yids = ['%s, %s, %s'%((self.polyid,)+subcoefid) for subcoefid in subcoefids] senstrs = [[exprmanip.diff_expr(subcoefstr, pid) for pid in self.pids] for subcoefstr in subcoefstrs] hstr = str(subcoefstrs).replace("'", "") Dhstr = str(senstrs).replace("'", "") subs0 = self.convarvals.to_dict() def h(p): subs = subs0.copy() subs.update(dict(zip(self.pids, p))) return np.array(eval(hstr, subs)) def Dh(p): subs = subs0.copy() subs.update(dict(zip(self.pids, p))) return np.array(eval(Dhstr, subs)) h = predict.Predict(f=h, Df=Dh, pids=self.pids, p0=self.p0, yids=yids, frepr=butil.Series(subcoefstrs, yids), Dfrepr=butil.DF(senstrs, yids, self.pids)) return h
def get_s_integration(net, p=None, Tmin=None, Tmax=None, k=None, tol=None, to_ser=False): """ """ if p is not None: net.update_optimizable_vars(p) if Tmin is None: Tmin = TMIN if Tmax is None: Tmax = TMAX if k is None: k = K if tol is None: tol = TOL_SS nsp = len(net.dynamicVars) tmin, tmax = 0, Tmin x0 = net.x0.copy() constants = net.constantVarValues while tmax <= Tmax: # yp0 helps integration stability yp0 = Dynamics.find_ics(net.x0, net.x0, tmin, net._dynamic_var_algebraic, [1e-6] * nsp, [1e-6] * nsp, constants, net)[1] # using daskr to save computational overhead out = daskr.daeint(res=net.res_function, t=[tmin, tmax], y0=x0, yp0=yp0, atol=[1e-6] * nsp, rtol=[1e-6] * nsp, intermediate_output=False, rpar=constants, max_steps=100000.0, max_timepoints=100000.0, jac=net.ddaskr_jac) xt = out[0][-1] dxdt = net.res_function(tmax, xt, [0] * nsp, constants) if np.max(np.abs(dxdt)) < tol: net.updateVariablesFromDynamicVars(xt, tmax) net.t = tmax if to_ser: return butil.Series(xt, net.xids) else: return xt else: tmin, tmax = tmax, tmax * k x0 = xt raise Exception("Cannot reach steady state for p=%s" % p)
def __init__(self, msrmts, order_varid=None): """ Three levels of information: - Coarsest level: varids, times - Intermediate level: varid2times - Finest level: msrmts All levels are useful: - Coarse level for guiding integration and quick inspection - Intermediate level for speedy quering of integration results - Finest level for generating yids Input: order_varid: a list of varids specifying the order of varids (as expts can be scrambled) """ varids, times = zip(*msrmts) if order_varid is not None: varids = [varid for varid in order_varid if varid in varids] else: varids = butil.Series(varids).drop_duplicates().tolist() times = sorted(set(times)) varid2times = OD() for varid in varids: varid2times[varid] = sorted( [time_ for varid_, time_ in msrmts if varid_ == varid]) msrmts = [] for varid, times_ in varid2times.items(): msrmts.extend([(varid, time) for time in times_]) list.__init__(self, msrmts) self.varids = varids self.times = times self.varid2times = varid2times
def get_s_rootfinding(net, p=None, x0=None, tol=None, ntrial=3, seeds=None, test_stability=True, full_output=False, to_ser=False, **kwargs_fsolve): """Return the steady state values of dynamic variables found by the root-finding method, which may or may not represent the true steady state. It may be time-consuming the first time it is called, as attributes like P are calculated and cached. Input: p: x0: initial guess in rootfinding; by default the current x of net to_ser: kwargs_fsolve: Documentation of scipy.optimize.fsolve: """ if p is not None: net.update_optimizable_vars(p) if tol is None: tol = TOL_SS if ntrial is None: ntrial = NTRIAL x = np.array([var.value for var in net.dynamicVars]) if np.max(np.abs(net.get_dxdt(x=x))) < tol: # steady-state if full_output: return x, {}, 1, "" else: return x if not hasattr(net, 'pool_mul_mat'): print "net has no P: calculating P." P = net.P P = net.P.values npool = P.shape[0] if npool > 0: poolsizes = np.dot(P, [var.initialValue for var in net.dynamicVars]) # Indices of independent dynamic variables ixidxs = [net.xids.index(xid) for xid in net.ixids] def _f(x): """This is a function to be passed to scipy.optimization.fsolve, which takes values of all dynamic variable (x) as input and outputs the time-derivatives of independent dynamic variables (dxi/dt) and the differences between the current pool sizes (as determined by the argument dynvarvals) and the correct pool sizes. """ dxdt = net.get_dxdt(x=x) if npool > 0: dxidt = dxdt[ixidxs] diffs = np.dot(P, x) - poolsizes return np.concatenate((dxidt, diffs)) else: return dxdt def _Df(x): """ """ dfidx = net.dres_dc_function(0, x, [0] * len(x), net.constantVarValues)[ixidxs] if npool > 0: return np.concatenate((dfidx, P)) else: return dfidx if x0 is None: x0 = net.x0 if tol is None: tol = 1.49012e-08 # scipy default out = sp.optimize.fsolve(_f, x0, fprime=_Df, xtol=tol, full_output=1, **kwargs_fsolve) count = 1 while out[2] != 1 and count <= ntrial: count += 1 if seeds is None: seed = count else: seed = seeds.pop(0) out = sp.optimize.fsolve(_f, butil.Series(x0).randomize(seed=seed), fprime=_Df, xtol=tol, full_output=1, **kwargs_fsolve) s = out[0] net.update(x=s, t_x=np.inf) if test_stability: jac = net.get_jac_mat() if any(np.linalg.eigvals(jac) > 0): print "Warning: the solution is an unstable steady state." if to_ser: s = Series(s, net.xids) out[0] = s if full_output: return out else: return s
self.update(p=p) return np.array(eval(str_h, self.varvals.to_dict())) def Dh(p): self.update(p=p) return np.array(eval(str_Dh, self.varvals.to_dict())) coefs = predict.Predict(f=h, Df=Dh, pids=self.pids, p0=self.p0, yids=yids, funcform=coefs_r) return coefs if __name__ == '__main__': polystr1 = 'k1*k2*r*X1**2 + k1^2*X2*X3 + X3*k2*X1*X2' polystr2 = 'k1**2*r*X2**2 + 2*k1^2*X1*X3' pids = ['k1','k2','Vf1'] varids = ['X1','X2','RuBP'] p0 = [1,2,3] poly1 = Polynomial.from_str(polystr1, varids, pids=pids, p0=p0, polyid='X1', subvarids=['r'], convarvals=butil.Series([1], ['X3'])) poly2 = Polynomial.from_str(polystr2, varids, pids=pids, p0=p0, polyid='X1', subvarids=['r'], convarvals=butil.Series([1], ['X3'])) polys = Polynomials([poly1, poly2])