def SPM_fun(I, c_n, c_p, param): g_n = param.m_n * param.C_hat_n * np.sqrt(c_n) * np.sqrt(1 - c_n) g_p = param.m_p * param.C_hat_p * np.sqrt(c_p) * np.sqrt(1 - c_p) result = (ocp.U_p(c_p, param.T_0, param) - ocp.U_n(param.c_n, param.T_0, param) - (2 / param.Lambda) * np.arcsinh(I / (g_p * param.L_p)) - (2 / param.Lambda) * np.arcsinh(I / (g_n * param.L_n))) return result
def callback(self, wghts, step, g, gargs): """Callback function for optimization loop.""" closs=1.0 if step == 0: print(' ------------------------- ') print('| Solving for dOTD mode {0:d} |'.format(len(gargs[1])+1)) print(' ------------------------- ') if len(gargs[1]) == 0: try: os.remove('logerr.out') except OSError: pass if step % 50 == 0: closs, lyap = self.losses(wghts, step, gargs) logstring = ("Iteration {0:4d} \t" + "Loss PDE = {1:0.12f} \t" + "Lyap = {2:0.2e}")\ .format(step, closs, -np.arcsinh(lyap)) print(logstring) with open('logerr.out', "a") as text_file: # Write to logfile text_file.write(logstring + "\n") return closs
def test_arcsinh(): fun = lambda x : 3.0 * np.arcsinh(x) d_fun = grad(fun) check_grads(fun, npr.randn()) check_grads(d_fun, npr.randn())
def test_arcsinh(): fun = lambda x: 3.0 * np.arcsinh(x) check_grads(fun)(npr.randn())
def test_arcsinh(): fun = lambda x : 3.0 * np.arcsinh(x) check_grads(fun)(npr.randn())
def inv_sal(par, x): return np.sinh((np.arcsinh((x - par[0]) / par[1]) - par[3]) / par[2])
def inv_sa(par, x): return np.sinh((np.arcsinh(x) - par[1]) / par[0])
def sal(par, y): return par[0] + par[1] * np.sinh(par[2] * np.arcsinh(y) + par[3])
def sa(par, y): return np.sinh(par[0] * np.arcsinh(y) + par[1])
def asinh(par, y): return par[0] + par[1] * np.arcsinh((y - par[2]) / par[3])