def _get_smat(p, scheme, cutoff_singval, stepscale, temperature): if scheme == 'jtj': jac = Dfunc(p) jtj = jac.T * jac smat = _hess2smat(jtj, cutoff_singval, stepscale, temperature) if scheme == 'eye': smat = Matrix.eye(func.pids) * stepscale return smat
def _get_smat(p, scheme, cutoff_singval, stepscale, temperature): if scheme == 'jtj': jac = Dfunc(p) jtj = np.dot(jac.T, jac) smat = _hess2smat(jtj, cutoff_singval, stepscale, temperature) if scheme == 'eye': smat = Matrix.eye(func.pids) * stepscale return smat
def get_flux_ctrl_mat(net, p=None, normed=False): """ """ net.update(p=p, t=np.inf) I, Es, Cs = Matrix.eye(net.Jids, net.vids), net.Es, net.Cs CJ = I + (Es * Cs).ch_rowvarids(net.Jids) if normed: return CJ.normalize(net.J, net.v) else: return CJ
def get_flux_ctrl_mat(net, p=None, normed=False): """ """ net.update(p=p, t=np.inf) I, Es, Cs = Matrix.eye(net.fluxids, net.rateids), net.Es, net.Cs CJ = I + (Es * Cs).ch_rowvarids(net.fluxids) if normed: return CJ.normalize(net.J, net.v) else: return CJ
def get_link_mat(net): """ L0: L = [I ] [L0] N = L * Nr """ I = Matrix.eye(net.ixids) L0 = get_reduced_link_mat(net) L = Matrix(pd.concat((I, L0))) return L
def get_link_mat(net): """ L0: L = [I ] [L0] N = L * Nr """ I = Matrix.eye(net.ixids) if len(net.ixids) == len(net.xids): L = I else: L0 = -net.P.loc[:, net.ixids].ch_rowvarids(net.dxids) L = Matrix(pd.concat((I, L0))) return L
def fit_lm_custom(self, p0=None, in_logp=True, maxnstep=1000, ret_full=False, ret_steps=False, disp=False, lamb0=0.001, tol=1e-6, k_up=10, k_down=10, ndone=5, **kwargs): """ Input: k_up and k_down: parameters used in tuning lamb at each step; in the traditional scheme, typically k_up = k_down = 10; in the delayed gratification scheme, typically k_up = 2, k_down = 10 (see, [1]) grad C = Jt * r J = U * S * Vt ______ ______ | | | | ______ ______ | | | | | | | | | J | = | U | | S | | Vt | | | | | |______| |______| |______| |______| Vt * V = V * Vt = I Ut* U = I =/= U * Ut JtJ = (V * S * Ut) * (U * S * Vt) = V * S^2 * Vt ______ ____________ ______ | | | | | | ______ | | | | | | | | | | = | | | | | | | | | | | | |______| |______| |____________| |______| Gradient Descent step: delta p = - grad C Gauss Newton step: delta p = - (JtJ).I * grad C Levenberg Marquardt step: delta p = - (JtJ + lamb * I).inv * grad C = - (V * (S^2 + lamb * I) * Vt).inv * Jt * r = - (V * (S^2 + lamb * I).inv * Vt) * V * S * Ut * r = - V * (S^2 + lamb * I).inv * S * Ut * r References: [1] Transtrum [2] Numerical Recipes """ if p0 is None: p0 = self.p0 else: p0 = Series(p0, self.pids) if in_logp: res = self.get_in_logp() p0 = p0.log() else: res = self if maxnstep is None : maxnstep = len(res.pids) * 100 nstep = 0 nfcall = 0 nDfcall = 0 p = p0 lamb = lamb0 done = 0 accept = True convergence = False r = res(p0) cost = _r2cost(r) nfcall += 1 if ret_steps: ps = DF([p0], columns=res.pids) deltaps = DF(columns=res.pids) costs = Series([cost], name='cost') lambs = Series([lamb], name='lamb') ps.index.name = 'step' costs.index.name = 'step' lambs.index.name = 'step' while not convergence and nstep < maxnstep: if accept: jac = res.Dr(p) U, S, Vt = jac.svd(to_mat=True) nDfcall += 1 deltap = - Vt.T * (S**2 + lamb * Matrix.eye(res.pids)).I * S * U.T * r deltap = deltap[0] # convert 1-d DF to series p2 = p + deltap nstep += 1 r2 = res(p2) cost2 = _r2cost(r2) nfcall += 1 if np.abs(cost - cost2) < max(tol, cost * tol): done += 1 if cost2 < cost: accept = True lamb /= k_down p = p2 r = r2 cost = cost2 else: accept = False lamb *= k_up if ret_steps: ps.loc[ps.nrow] = p deltaps.loc[deltaps.nrow] = deltap costs.loc[costs.size] = cost lambs.loc[lambs.size] = lamb if done == ndone: convergence = True # lamb = 0 if in_logp: p = p.exp() if ret_steps: ps = np.exp(ps) ps.columns = self.pids out = Series(OD([('p', p), ('cost', cost)])) if ret_full: out.nfcall = nfcall out.nDfcall = nDfcall out.convergence = convergence out.nstep = nstep if ret_steps: out.ps = ps out.deltaps = deltaps out.costs = costs out.lambs = lambs return out
def fit_lm_custom( res, p0=None, in_logp=True, maxnstep=1000, disp=False, #ret_full=False, ret_steps=False, lamb0=1e-3, tol=1e-6, k_up=10, k_down=10, ndone=5, **kwargs): """ Input: k_up and k_down: parameters used in tuning lamb at each step; in the traditional scheme, typically k_up = k_down = 10; in the delayed gratification scheme, typically k_up = 2, k_down = 10 (see, [1]) grad C = Jt * r J = U * S * Vt ______ ______ | | | | ______ ______ | | | | | | | | | J | = | U | | S | | Vt | | | | | |______| |______| |______| |______| V.T * V = V * V.T = I U.T * U = I =/= U * U.t J.T * J = (V * S * U.T) * (U * S * V.T) = V * S^2 * V.T ______ ____________ ______ | | | | | | ______ | | | | | | | | | | = | | | | | | | | | | | | |______| |______| |____________| |______| Gradient-descent step: delta p = - grad C = - J.T * r Gauss-Newton step: delta p = - (J.T * J).inv * grad C = - (J.T * J).inv * J.T * r Levenberg step: delta p = - (J.T * J + lamb * I).inv * grad C = - (V * (S^2 + lamb * I) * V.T).I * J.T * r = - (V * (S^2 + lamb * I).inv * V.T) * V * S * U.T * r = - V * (S^2 + lamb * I).inv * S * U.T * r References: [1] Transtrum [2] Numerical Recipes """ if p0 is None: p0 = res.p0 else: p0 = Series(p0, res.pids) if in_logp: res = res.get_in_logp() p0 = p0.log() else: res = res if maxnstep is None: maxnstep = len(res.pids) * 100 nstep = 0 nfcall = 0 nDfcall = 0 p = p0 lamb = lamb0 done = 0 accept = True convergence = False r = res(p0) cost = _r2cost(r) nfcall += 1 ps = [p0] deltaps = [] costs = [cost] lambs = [lamb] while not convergence and nstep < maxnstep: if accept: ## FIXME *** jac = res.Dr(p, to_mat=True) U, S, Vt = jac.svd(to_mat=True) nDfcall += 1 deltap = -Vt.T * (S**2 + lamb * Matrix.eye(res.pids)).I * S * U.T * r deltap = deltap[0] # convert 1-d DF to series p2 = p + deltap nstep += 1 if disp: #print nstep print deltap.exp()[:10] print lamb #print p2 #from util import butil #butil.set_global(p=p, deltap=deltap, p2=p2, nstep=nstep) r2 = res(p2) cost2 = _r2cost(r2) nfcall += 1 if np.abs(cost - cost2) < max(tol, cost * tol): done += 1 if cost2 < cost: accept = True lamb /= k_down p = p2 r = r2 cost = cost2 else: accept = False lamb *= k_up ps.append(p) deltaps.append(deltap) costs.append(cost) lambs.append(lamb) if done == ndone: convergence = True # lamb = 0 if in_logp: ps = np.exp(ps) pids = map(lambda pid: pid.lstrip('log_'), res.pids) else: pids = res.pids ## need to calculate cov FIXME *** fit = Fit(costs=costs, ps=ps, pids=pids, lambs=lambs, nfcall=nfcall, nDfcall=nDfcall, convergence=convergence, nstep=nstep) return fit