def test_derivative_on_log(): x = np.r_[0.01, 0.1] for method in ['forward', 'reverse']: dlog = nd.Derivative(np.log, method=method) assert_array_almost_equal(dlog(x), 1.0 / x)
def test_derivative_sin(): # Evaluate the indicated (default = first) # derivative at multiple points for method in ['forward', 'reverse']: dsin = nd.Derivative(np.sin, method=method) x = np.linspace(0, 2. * np.pi, 13) y = dsin(x) assert_allclose(y, np.cos(x))
def test_derivative_on_sinh(self, x): true_val = np.cosh(x) for method in ['forward', ]: # 'reverse']: # TODO: reverse fails dsinh = nd.Derivative(np.sinh, method=method) val = dsinh(x) if np.isnan(true_val): assert np.isnan(val) == np.isnan(true_val) else: assert_allclose(val, true_val)
def test_high_order_derivative_cos(): true_vals = (-1.0, 0.0, 1.0, 0.0) * 5 x = np.pi / 2 # np.linspace(0, np.pi/2, 15) for method in ['forward', 'reverse']: nmax = 15 if method in ['forward'] else 2 for n in range(1, nmax): d3cos = nd.Derivative(np.cos, n=n, method=method) y = d3cos(x) assert_allclose(y, true_vals[n - 1], atol=1e-15)
def test_derivative_on_sinh(self, x): true_val = np.cosh(x) for method in [ 'forward', ]: # 'reverse']: # TODO: reverse fails dsinh = nd.Derivative(np.sinh, method=method) val = dsinh(x) if np.isnan(true_val): self.assertEqual(np.isnan(val), np.isnan(true_val)) else: self.assertAlmostEqual(val, true_val)
def _a_levin(omega, f, g, d_g, x, s, basis, *args, **kwds): def psi(t, k): return d_g(t, *args, **kwds) * basis(t, k) j_w = 1j * omega nu = np.ones((len(x),), dtype=int) nu[0] = nu[-1] = s S = np.cumsum(np.hstack((nu, 0))) S[-1] = 0 n = int(S[-2]) a_matrix = np.zeros((n, n), dtype=complex) rhs = np.zeros((n,)) dff = Limit(nda.Derivative(f)) d_psi = Limit(nda.Derivative(psi)) dbasis = basis.derivative for r, t in enumerate(x): for j in range(S[r - 1], S[r]): order = ((j - S[r - 1]) % nu[r]) # derivative order dff.fun.n = order rhs[j] = dff(t, *args, **kwds) d_psi.fun.n = order for k in range(n): a_matrix[j, k] = (dbasis(t, k, n=order + 1) + j_w * d_psi(t, k)) k1 = np.flatnonzero(1 - np.isfinite(rhs)) if k1.size > 0: # Remove singularities warnings.warn('Singularities detected! ') a_matrix[k1] = 0 rhs[k1] = 0 solution = linalg.lstsq(a_matrix, rhs) v = basis.eval([-1, 1], solution[0]) lim_g = Limit(g) g_b = np.exp(j_w * lim_g(1, *args, **kwds)) if np.isnan(g_b): g_b = 0 g_a = np.exp(j_w * lim_g(-1, *args, **kwds)) if np.isnan(g_a): g_a = 0 return v[1] * g_b - v[0] * g_a
def aLevinTQ(omega, ff, gg, dgg, x, s, basis, *args, **kwds): def Psi(t, k): return dgg(t, *args, **kwds) * basis(t, k) j_w = 1j * omega nu = np.ones((len(x), ), dtype=int) nu[0] = nu[-1] = s S = np.cumsum(np.hstack((nu, 0))) S[-1] = 0 nn = int(S[-2]) A = np.zeros((nn, nn), dtype=complex) F = np.zeros((nn, )) dff = Limit(nda.Derivative(ff)) dPsi = Limit(nda.Derivative(Psi)) dbasis = basis.derivative for r, t in enumerate(x): for j in range(S[r - 1], S[r]): order = ((j - S[r - 1]) % nu[r]) # derivative order dff.f.n = order F[j] = dff(t, *args, **kwds) dPsi.f.n = order for k in range(nn): A[j, k] = (dbasis(t, k, n=order + 1) + j_w * dPsi(t, k)) k1 = np.flatnonzero(1 - np.isfinite(F)) if k1.size > 0: # Remove singularities warnings.warn('Singularities detected! ') A[k1] = 0 F[k1] = 0 LS = linalg.lstsq(A, F) v = basis.eval([-1, 1], LS[0]) lim_gg = Limit(gg) gb = np.exp(j_w * lim_gg(1, *args, **kwds)) if np.isnan(gb): gb = 0 ga = np.exp(j_w * lim_gg(-1, *args, **kwds)) if np.isnan(ga): ga = 0 NR = (v[1] * gb - v[0] * ga) return NR
def test_derivative_cube(): """Test for Issue 7""" def cube(x): return x * x * x shape = (3, 2) x = np.ones(shape) * 2 for method in ['forward', 'reverse']: dcube = nd.Derivative(cube, method=method) dx = dcube(x) assert_allclose(list(dx.shape), list(shape), err_msg='Shape mismatch') txt = 'First differing element %d\n value = %g,\n true value = %g' for i, (val, tval) in enumerate(zip(dx.ravel(), (3 * x ** 2).ravel())): assert_allclose(val, tval, err_msg=txt % (i, val, tval))
def test_derivative_on_log(x): for method in ['forward', 'reverse']: dlog = nd.Derivative(np.log, method=method) assert_allclose(dlog(x), 1.0 / x)
def test_derivative_exp(x): for method in ['forward', 'reverse']: dexp = nd.Derivative(np.exp, method=method) assert_allclose(dexp(x), np.exp(x))
]) sys.exit() grad_fct = grad(sphere) print(grad_fct([beta_syntetic[0], beta_syntetic[0], beta_syntetic[0]])) sphere(beta_syntetic + epsilon) print(1) grad_fct = jacobian(mixedSchwefel) print(grad_fct(beta_syntetic)) np.gradient([sphere], beta_syntetic) print(2) import numdifftools.nd_algopy as nda import numdifftools as nd fd = nda.Derivative(sphere) # 1'st derivative fd = nda.Jacobian(sphere) fd(beta_syntetic) np.allclose(fd(1), 2.7182818284590424) True # #bnds = list(zip(np.repeat(-500,Nparam),np.repeat(500,Nparam))) #((0, None), (0, None)) #values=pd.DataFrame(np.zeros((Iter*N+1,N)),columns=np.arange(N),index=np.arange(Iter*N+1)) # #target=TargetClass(dim=Nparam, minf=-500.0, maxf=500.0,target_function=target_function) #x01=np.random.randint(-5,5,size=Nparam) ##print (x0[index])
def test_derivative_on_sinh(self): for method in ['forward', ]: # 'reverse']: # TODO: reverse fails dsinh = nd.Derivative(np.sinh, method=method) self.assertAlmostEqual(dsinh(0.0), np.cosh(0.0))
def test_derivative_exp(): # derivative of exp(x), at x == 0 for method in ['forward', 'reverse']: dexp = nd.Derivative(np.exp, method=method) assert_array_almost_equal(dexp(0), np.exp(0), decimal=8)