def test_differentiate_args_issues(): def f(x, y): f1 = sin(3 * (x**2)) + x * tan(sqrt(y * 7)) f2 = y**(3 * x) - sin(x) return f1, f2 dfdx = differentiate(f) # test no args with pytest.raises(KeyError): dfdx() # test both args with pytest.raises(KeyError): dfdx(4, x=2) # test incorrect kwargs key with pytest.raises(KeyError): dfdx(x=2, z=2) # test incorrect number of keys with pytest.raises(KeyError): dfdx(x=2) # test too many keys with pytest.raises(KeyError): dfdx(x=2, y=3, z=3)
def test_differentiate_scalar_function_multiple_inputs(): def f(x, y): return sin(3 * (x**2)) + x * tan(sqrt(y * 7)) dfdx = differentiate(f) assert np.allclose(dfdx(x=4, y=2), np.array([-14.6792, 5.49340])) assert np.allclose( dfdx(x=np.array([1, 2, 3]), y=np.array([2, 1, 4])), np.array([-5.25572, 9.58533, -6.78778, 1.37335, 3.41987, 6.62502]))
def test_differentiate_scalar_function_multiple_inputs(): def f(x, y): f1 = sin(3 * (x**2)) + x * tan(sqrt(y * 7)) f2 = y**(3 * x) - sin(x) return f1, f2 dfdx = differentiate(f) result1 = np.array([[-1.46792323e+01, 5.49340116e+00], [8.51804620e+03, 2.45760000e+04]]) assert np.allclose(dfdx(x=4, y=2), result1)
def test_differentiate_vector_function(): def f(x): f1 = cos(2**sin(x)) + 3 * x * sin(sqrt(x)) f2 = x**3 - sin(x) return f1, f2 dfdx = differentiate(f) assert np.allclose(dfdx(x=6), np.array([[-1.31673351], [107.03982971]])) assert np.allclose(dfdx(x=np.array([1, 3])), np.array([[2.6801, 3.2193], [2.4597, 27.990]]))
def test_differentiate_scalar_function(): def f(x): return sin(3 * (x**2)) + tan(sqrt(x * 7)) dfdx = differentiate(f) # test kwargs assert np.allclose(dfdx(x=5), 28.3316) assert np.allclose(dfdx(x=np.array([2, 1, 3])), np.array([11.4996, -4.2300, 40.3201])) # test args assert np.allclose(dfdx(5), 28.3316)
def _newton_raphson(function, values, threshold, max_iter): """Returns a root found starting from values using the Newton-Raphson method INPUTS ======= function: A function defined using the autodiffcc.ADmath methods values: Starting point for root-finding method as a scalar or vector threshold: Minimum threshold to declare convergence on a root max_iter: Maximum number of iterations taken for the algorithm to converge RETURNS ======== A root of function found starting from values or raised Exception if none are found EXAMPLES ========= >>> _newton_raphson(lambda x, y: (2 * x + y, x - 1), values=np.array([1, 2]), threshold=1e-8, max_iter=2000) array([ 1., -2.]) """ jacobian = differentiate(function) output_shape = len(np.array(function(*values)).flatten()) for i in range(max_iter): flat_variables = values.flatten() jacobian_values = jacobian(*values) if output_shape == 1: if jacobian_values == 0: raise Exception( "Newton-Raphson did not converge, try increasing max_iter or changing start_values." ) else: flat_variables = flat_variables - function( *values) / jacobian_values else: flat_variables = flat_variables - np.matmul( np.linalg.pinv(jacobian_values), function(*values)) values = flat_variables.reshape(values.shape) if _norm(function(*values)) < threshold: return values raise Exception( "Newton-Raphson did not converge, try increasing max_iter or changing start_values." )
def _newton_fourier(function, interval_start: np.ndarray, interval_end: np.ndarray, threshold, max_iter): """Returns a root of the function found using the Newton-Fourier algorithm INPUTS ======= function: A function defined using the autodiffcc.ADmath methods interval_start: The start of the initial interval of values on which to attempt to find a root, as an array interval_end: The end of the initial interval of values on which to attempt to find a root, as an array threshold: Minimum threshold to declare convergence on a root max_iter: Maximum number of iterations taken for the algorithm to converge RETURNS ======== A root of function found (approximately, within the threshold) starting from the interval or raised Exception if none are found EXAMPLES ========= >>> interval_start = np.asarray([3, 3]) >>> interval_end = np.asarray([-3, -3]) >>> my_root = _newton_fourier(lambda x, y: (2 * x + y - 2, y + 2), interval_start=interval_start, interval_end=interval_end, threshold=1e-8, max_iter=2000) >>> print(my_root) [ 2. -2.] """ # using positional variables x_vars = interval_start z_vars = interval_end output_shape = len(np.array(function(*x_vars)).flatten()) # Starting values for x_0, z_0 flat_x = x_vars.flatten() flat_z = z_vars.flatten() # numerator of the limit for termination of Newton-Fourier limit_numerator = (x_vars.flatten() - z_vars.flatten())**2 jacobian = differentiate(function) for i in range(max_iter): if output_shape == 1: common_jacobian = jacobian(*x_vars) if common_jacobian == 0: raise Exception( "Newton-Fourier did not converge, try another interval or increasing max_iter." ) flat_x = flat_x - function(*x_vars) / common_jacobian flat_z = flat_z - function(*z_vars) / common_jacobian else: common_jacobian = np.linalg.pinv(jacobian(*x_vars)) flat_x = flat_x - np.matmul(common_jacobian, function(*x_vars)) flat_z = flat_z - np.matmul(common_jacobian, function(*z_vars)) limit_denominator = limit_numerator limit_numerator = flat_x - flat_z if limit_denominator.any() == 0: raise Exception( "Newton-Fourier did not converge, try another interval or increasing max_iter." ) limit = limit_numerator / limit_denominator x_vars = flat_x.reshape(x_vars.shape) z_vars = flat_z.reshape(z_vars.shape) if limit.all() < threshold: return np.mean(np.vstack([x_vars, z_vars]), axis=0) if _norm(function(*x_vars)) < threshold and _norm( function(*z_vars)) < threshold: raise Exception( f"Newton-Fourier did not converge, but two roots were found: {x_vars} and {z_vars}. Try narrowing the interval." ) raise Exception( "Newton-Fourier did not converge, try another interval or increasing max_iter." )
def test_differentiate_vector_function_0_der(): def f(x, y): return 2, 3 assert np.allclose(differentiate(f)(3, 1), 0)
def test_differentiate_scalar_function_0_der(): def f(x): return 2 assert np.isclose(differentiate(f)(3), 0)