def test_imsave(self): picdir = os.path.join(datapath, "data") for png in glob.iglob(picdir + "/*.png"): with suppress_warnings() as sup: # PIL causes a Py3k ResourceWarning sup.filter(message="unclosed file") img = misc.imread(png) tmpdir = tempfile.mkdtemp() try: fn1 = os.path.join(tmpdir, 'test.png') fn2 = os.path.join(tmpdir, 'testimg') with suppress_warnings() as sup: # PIL causes a Py3k ResourceWarning sup.filter(message="unclosed file") misc.imsave(fn1, img) misc.imsave(fn2, img, 'PNG') with suppress_warnings() as sup: # PIL causes a Py3k ResourceWarning sup.filter(message="unclosed file") data1 = misc.imread(fn1) data2 = misc.imread(fn2) assert_allclose(data1, img) assert_allclose(data2, img) assert_equal(data1.shape, img.shape) assert_equal(data2.shape, img.shape) finally: shutil.rmtree(tmpdir)
def test_zero_der_nz_dp(): """Test secant method with a non-zero dp, but an infinite newton step""" # pick a symmetrical functions and choose a point on the side that with dx # makes a secant that is a flat line with zero slope, EG: f = (x - 100)**2, # which has a root at x = 100 and is symmetrical around the line x = 100 # we have to pick a really big number so that it is consistently true # now find a point on each side so that the secant has a zero slope dx = np.finfo(float).eps ** 0.33 # 100 - p0 = p1 - 100 = p0 * (1 + dx) + dx - 100 # -> 200 = p0 * (2 + dx) + dx p0 = (200.0 - dx) / (2.0 + dx) with suppress_warnings() as sup: sup.filter(RuntimeWarning, "RMS of") x = zeros.newton(lambda y: (y - 100.0)**2, x0=[p0] * 10) assert_allclose(x, [100] * 10) # test scalar cases too p0 = (2.0 - 1e-4) / (2.0 + 1e-4) with suppress_warnings() as sup: sup.filter(RuntimeWarning, "Tolerance of") x = zeros.newton(lambda y: (y - 1.0) ** 2, x0=p0) assert_allclose(x, 1) p0 = (-2.0 + 1e-4) / (2.0 + 1e-4) with suppress_warnings() as sup: sup.filter(RuntimeWarning, "Tolerance of") x = zeros.newton(lambda y: (y + 1.0) ** 2, x0=p0) assert_allclose(x, -1)
def test_callback(self): def store_residual(r, rvec): rvec[rvec.nonzero()[0].max()+1] = r # Define, A,b A = csr_matrix(array([[-2,1,0,0,0,0],[1,-2,1,0,0,0],[0,1,-2,1,0,0],[0,0,1,-2,1,0],[0,0,0,1,-2,1],[0,0,0,0,1,-2]])) b = ones((A.shape[0],)) maxiter = 1 rvec = zeros(maxiter+1) rvec[0] = 1.0 callback = lambda r:store_residual(r, rvec) with suppress_warnings() as sup: sup.filter(DeprecationWarning, ".*called without specifying.*") x,flag = gmres(A, b, x0=zeros(A.shape[0]), tol=1e-16, maxiter=maxiter, callback=callback) # Expected output from Scipy 1.0.0 assert_allclose(rvec, array([1.0, 0.81649658092772603]), rtol=1e-10) # Test preconditioned callback M = 1e-3 * np.eye(A.shape[0]) rvec = zeros(maxiter+1) rvec[0] = 1.0 with suppress_warnings() as sup: sup.filter(DeprecationWarning, ".*called without specifying.*") x, flag = gmres(A, b, M=M, tol=1e-16, maxiter=maxiter, callback=callback) # Expected output from Scipy 1.0.0 (callback has preconditioned residual!) assert_allclose(rvec, array([1.0, 1e-3 * 0.81649658092772603]), rtol=1e-10)
def test_moments(distname, arg, normalization_ok, higher_ok): try: distfn = getattr(stats, distname) except TypeError: distfn = distname distname = 'rv_histogram_instance' with suppress_warnings() as sup: sup.filter(IntegrationWarning, "The integral is probably divergent, or slowly convergent.") m, v, s, k = distfn.stats(*arg, moments='mvsk') if normalization_ok: check_normalization(distfn, arg, distname) if higher_ok: check_mean_expect(distfn, arg, m, distname) with suppress_warnings() as sup: sup.filter(IntegrationWarning, "The integral is probably divergent, or slowly convergent.") check_skew_expect(distfn, arg, m, v, s, distname) check_var_expect(distfn, arg, m, v, distname) check_kurt_expect(distfn, arg, m, v, k, distname) check_loc_scale(distfn, arg, m, v, distname) check_moment(distfn, arg, m, v, distname)
def test_errprint(): with suppress_warnings() as sup: sup.filter(DeprecationWarning, "`errprint` is deprecated!") flag = sc.errprint(True) try: assert_(isinstance(flag, bool)) with pytest.warns(sc.SpecialFunctionWarning): sc.loggamma(0) finally: with suppress_warnings() as sup: sup.filter(DeprecationWarning, "`errprint` is deprecated!") sc.errprint(flag)
def test_bytescale_cscale_lowhigh(self): a = np.arange(10) with suppress_warnings() as sup: sup.filter(DeprecationWarning) actual = misc.bytescale(a, cmin=3, cmax=6, low=100, high=200) expected = [100, 100, 100, 100, 133, 167, 200, 200, 200, 200] assert_equal(actual, expected)
def test_bytescale(self): x = np.array([0, 1, 2], np.uint8) y = np.array([0, 1, 2]) with suppress_warnings() as sup: sup.filter(DeprecationWarning) assert_equal(misc.bytescale(x), x) assert_equal(misc.bytescale(y), [0, 128, 255])
def test_imread_indexed_png(): # The file `foo3x5x4indexed.png` was created with this array # (3x5 is (height)x(width)): data = np.array([[[127, 0, 255, 255], [127, 0, 255, 255], [127, 0, 255, 255], [127, 0, 255, 255], [127, 0, 255, 255]], [[192, 192, 255, 0], [192, 192, 255, 0], [0, 0, 255, 0], [0, 0, 255, 0], [0, 0, 255, 0]], [[0, 31, 255, 255], [0, 31, 255, 255], [0, 31, 255, 255], [0, 31, 255, 255], [0, 31, 255, 255]]], dtype=np.uint8) filename = os.path.join(datapath, 'data', 'foo3x5x4indexed.png') with open(filename, 'rb') as f: with suppress_warnings() as sup: sup.filter(DeprecationWarning) im = misc.imread(f) assert_array_equal(im, data)
def test_bytescale_low_equals_high(self): a = np.arange(3) with suppress_warnings() as sup: sup.filter(DeprecationWarning) actual = misc.bytescale(a, low=10, high=10) expected = [10, 10, 10] assert_equal(actual, expected)
def test_bytescale_rounding(self): a = np.array([-0.5, 0.5, 1.5, 2.5, 3.5]) with suppress_warnings() as sup: sup.filter(DeprecationWarning) actual = misc.bytescale(a, cmin=0, cmax=10, low=0, high=10) expected = [0, 1, 2, 3, 4] assert_equal(actual, expected)
def test_spherical_jn_inf_complex(self): # https://dlmf.nist.gov/10.52.E3 n = 7 x = np.array([-inf + 0j, inf + 0j, inf*(1+1j)]) with suppress_warnings() as sup: sup.filter(RuntimeWarning, "invalid value encountered in multiply") assert_allclose(spherical_jn(n, x), np.array([0, 0, inf*(1+1j)]))
def test_breakdown_underdetermined(self): # Should find LSQ solution in the Krylov span in one inner # iteration, despite solver breakdown from nilpotent A. A = np.array([[0, 1, 1, 1], [0, 0, 1, 1], [0, 0, 0, 1], [0, 0, 0, 0]], dtype=float) bs = [ np.array([1, 1, 1, 1]), np.array([1, 1, 1, 0]), np.array([1, 1, 0, 0]), np.array([1, 0, 0, 0]), ] for b in bs: with suppress_warnings() as sup: sup.filter(DeprecationWarning, ".*called without specifying.*") xp, info = lgmres(A, b, maxiter=1) resp = np.linalg.norm(A.dot(xp) - b) K = np.c_[b, A.dot(b), A.dot(A.dot(b)), A.dot(A.dot(A.dot(b)))] y, _, _, _ = np.linalg.lstsq(A.dot(K), b, rcond=-1) x = K.dot(y) res = np.linalg.norm(A.dot(x) - b) assert_allclose(resp, res, err_msg=repr(b))
def test_multiple_constraint_objects(self): fun = lambda x: (x[0] - 1)**2 + (x[1] - 2.5)**2 + (x[2] - 0.75)**2 x0 = [2, 0, 1] coni = [] # only inequality constraints (can use cobyla) methods = ["slsqp", "cobyla", "trust-constr"] # mixed old and new coni.append([{'type': 'ineq', 'fun': lambda x: x[0] - 2 * x[1] + 2}, NonlinearConstraint(lambda x: x[0] - x[1], -1, 1)]) coni.append([LinearConstraint([1, -2, 0], -2, np.inf), NonlinearConstraint(lambda x: x[0] - x[1], -1, 1)]) coni.append([NonlinearConstraint(lambda x: x[0] - 2 * x[1] + 2, 0, np.inf), NonlinearConstraint(lambda x: x[0] - x[1], -1, 1)]) for con in coni: funs = {} for method in methods: with suppress_warnings() as sup: sup.filter(UserWarning) result = minimize(fun, x0, method=method, constraints=con) funs[method] = result.fun assert_allclose(funs['slsqp'], funs['trust-constr'], rtol=1e-4) assert_allclose(funs['cobyla'], funs['trust-constr'], rtol=1e-4)
def test_splev(self): xnew, b, b2 = self.xnew, self.b, self.b2 # check that splev works with 1D array of coefficients # for array and scalar `x` assert_allclose(splev(xnew, b), b(xnew), atol=1e-15, rtol=1e-15) assert_allclose(splev(xnew, b.tck), b(xnew), atol=1e-15, rtol=1e-15) assert_allclose([splev(x, b) for x in xnew], b(xnew), atol=1e-15, rtol=1e-15) # With n-D coefficients, there's a quirck: # splev(x, BSpline) is equivalent to BSpline(x) with suppress_warnings() as sup: sup.filter(DeprecationWarning, "Calling splev.. with BSpline objects with c.ndim > 1 is not recommended.") assert_allclose(splev(xnew, b2), b2(xnew), atol=1e-15, rtol=1e-15) # However, splev(x, BSpline.tck) needs some transposes. This is because # BSpline interpolates along the first axis, while the legacy FITPACK # wrapper does list(map(...)) which effectively interpolates along the # last axis. Like so: sh = tuple(range(1, b2.c.ndim)) + (0,) # sh = (1, 2, 0) cc = b2.c.transpose(sh) tck = (b2.t, cc, b2.k) assert_allclose(splev(xnew, tck), b2(xnew).transpose(sh), atol=1e-15, rtol=1e-15)
def test_triangularity_perturbation(self): # Experiment (1) of # Awad H. Al-Mohy and Nicholas J. Higham (2012) # Improved Inverse Scaling and Squaring Algorithms # for the Matrix Logarithm. A = np.array([ [3.2346e-1, 3e4, 3e4, 3e4], [0, 3.0089e-1, 3e4, 3e4], [0, 0, 3.221e-1, 3e4], [0, 0, 0, 3.0744e-1]], dtype=float) A_logm = np.array([ [-1.12867982029050462e+00, 9.61418377142025565e+04, -4.52485573953179264e+09, 2.92496941103871812e+14], [0.00000000000000000e+00, -1.20101052953082288e+00, 9.63469687211303099e+04, -4.68104828911105442e+09], [0.00000000000000000e+00, 0.00000000000000000e+00, -1.13289322264498393e+00, 9.53249183094775653e+04], [0.00000000000000000e+00, 0.00000000000000000e+00, 0.00000000000000000e+00, -1.17947533272554850e+00]], dtype=float) assert_allclose(expm(A_logm), A, rtol=1e-4) # Perturb the upper triangular matrix by tiny amounts, # so that it becomes technically not upper triangular. random.seed(1234) tiny = 1e-17 A_logm_perturbed = A_logm.copy() A_logm_perturbed[1, 0] = tiny with suppress_warnings() as sup: sup.filter(RuntimeWarning, "Ill-conditioned.*") A_expm_logm_perturbed = expm(A_logm_perturbed) rtol = 1e-4 atol = 100 * tiny assert_(not np.allclose(A_expm_logm_perturbed, A, rtol=rtol, atol=atol))
def test_cornercase(self): np.random.seed(1234) # Rounding error may prevent convergence with tol=0 --- ensure # that the return values in this case are correct, and no # exceptions are raised for n in [3, 5, 10, 100]: A = 2*eye(n) with suppress_warnings() as sup: sup.filter(DeprecationWarning, ".*called without specifying.*") b = np.ones(n) x, info = gcrotmk(A, b, maxiter=10) assert_equal(info, 0) assert_allclose(A.dot(x) - b, 0, atol=1e-14) x, info = gcrotmk(A, b, tol=0, maxiter=10) if info == 0: assert_allclose(A.dot(x) - b, 0, atol=1e-14) b = np.random.rand(n) x, info = gcrotmk(A, b, maxiter=10) assert_equal(info, 0) assert_allclose(A.dot(x) - b, 0, atol=1e-14) x, info = gcrotmk(A, b, tol=0, maxiter=10) if info == 0: assert_allclose(A.dot(x) - b, 0, atol=1e-14)
def test_integral(self): x = [1,1,1,2,2,2,4,4,4] y = [1,2,3,1,2,3,1,2,3] z = array([0,7,8,3,4,7,1,3,4]) with suppress_warnings() as sup: # This seems to fail (ier=1, see ticket 1642). sup.filter(UserWarning, "\nThe required storage space") lut = SmoothBivariateSpline(x, y, z, kx=1, ky=1, s=0) tx = [1,2,4] ty = [1,2,3] tz = lut(tx, ty) trpz = .25*(diff(tx)[:,None]*diff(ty)[None,:] * (tz[:-1,:-1]+tz[1:,:-1]+tz[:-1,1:]+tz[1:,1:])).sum() assert_almost_equal(lut.integral(tx[0], tx[-1], ty[0], ty[-1]), trpz) lut2 = SmoothBivariateSpline(x, y, z, kx=2, ky=2, s=0) assert_almost_equal(lut2.integral(tx[0], tx[-1], ty[0], ty[-1]), trpz, decimal=0) # the quadratures give 23.75 and 23.85 tz = lut(tx[:-1], ty[:-1]) trpz = .25*(diff(tx[:-1])[:,None]*diff(ty[:-1])[None,:] * (tz[:-1,:-1]+tz[1:,:-1]+tz[:-1,1:]+tz[1:,1:])).sum() assert_almost_equal(lut.integral(tx[0], tx[-2], ty[0], ty[-2]), trpz)
def test_singular(self): A = csc_matrix((5,5), dtype='d') b = array([1, 2, 3, 4, 5],dtype='d') with suppress_warnings() as sup: sup.filter(MatrixRankWarning, "Matrix is exactly singular") x = spsolve(A, b) assert_(not np.isfinite(x).any())
def test_reentrancy(): non_reentrant = [cg, cgs, bicg, bicgstab, gmres, qmr] reentrant = [lgmres, minres, gcrotmk] for solver in reentrant + non_reentrant: with suppress_warnings() as sup: sup.filter(DeprecationWarning, ".*called without specifying.*") _check_reentrancy(solver, solver in reentrant)
def test_zero_rhs(solver): np.random.seed(1234) A = np.random.rand(10, 10) A = A.dot(A.T) + 10 * np.eye(10) b = np.zeros(10) tols = np.r_[np.logspace(np.log10(1e-10), np.log10(1e2), 7)] for tol in tols: with suppress_warnings() as sup: sup.filter(DeprecationWarning, ".*called without specifying.*") x, info = solver(A, b, tol=tol) assert_equal(info, 0) assert_allclose(x, 0, atol=1e-15) x, info = solver(A, b, tol=tol, x0=ones(10)) assert_equal(info, 0) assert_allclose(x, 0, atol=tol) if solver is not minres: x, info = solver(A, b, tol=tol, atol=0, x0=ones(10)) if info == 0: assert_allclose(x, 0) x, info = solver(A, b, tol=tol, atol=tol) assert_equal(info, 0) assert_allclose(x, 0, atol=1e-300) x, info = solver(A, b, tol=tol, atol=0) assert_equal(info, 0) assert_allclose(x, 0, atol=1e-300)
def test_atol_legacy(self): with suppress_warnings() as sup: sup.filter(DeprecationWarning, ".*called without specifying.*") # Check the strange legacy behavior: the tolerance is interpreted # as atol, but only for the initial residual A = eye(2) b = 1e-6 * ones(2) x, info = gmres(A, b, tol=1e-5) assert_array_equal(x, np.zeros(2)) A = eye(2) b = ones(2) x, info = gmres(A, b, tol=1e-5) assert_(np.linalg.norm(A.dot(x) - b) <= 1e-5*np.linalg.norm(b)) assert_allclose(x, b, atol=0, rtol=1e-8) rndm = np.random.RandomState(12345) A = rndm.rand(30, 30) b = 1e-6 * ones(30) x, info = gmres(A, b, tol=1e-7, restart=20) assert_(np.linalg.norm(A.dot(x) - b) > 1e-7) A = eye(2) b = 1e-10 * ones(2) x, info = gmres(A, b, tol=1e-8, atol=0) assert_(np.linalg.norm(A.dot(x) - b) <= 1e-8*np.linalg.norm(b))
def test_bug_6690(self): # https://github.com/scipy/scipy/issues/6690 A_eq = np.array([[0., 0., 0., 0.93, 0., 0.65, 0., 0., 0.83, 0.]]) b_eq = np.array([0.9626]) A_ub = np.array([[0., 0., 0., 1.18, 0., 0., 0., -0.2, 0., -0.22], [0., 0., 0., 0., 0., 0., 0., 0., 0., 0.], [0., 0., 0., 0.43, 0., 0., 0., 0., 0., 0.], [0., -1.22, -0.25, 0., 0., 0., -2.06, 0., 0., 1.37], [0., 0., 0., 0., 0., 0., 0., -0.25, 0., 0.]]) b_ub = np.array([0.615, 0., 0.172, -0.869, -0.022]) bounds = np.array( [[-0.84, -0.97, 0.34, 0.4, -0.33, -0.74, 0.47, 0.09, -1.45, -0.73], [0.37, 0.02, 2.86, 0.86, 1.18, 0.5, 1.76, 0.17, 0.32, -0.15]]).T c = np.array([-1.64, 0.7, 1.8, -1.06, -1.16, 0.26, 2.13, 1.53, 0.66, 0.28]) with suppress_warnings() as sup: sup.filter(RuntimeWarning, "scipy.linalg.solve\nIll...") sup.filter(OptimizeWarning, "Solving system with option...") sol = linprog(c, A_ub=A_ub, b_ub=b_ub, A_eq=A_eq, b_eq=b_eq, bounds=bounds, method=self.method, options=self.options) _assert_success(sol, desired_fun=-1.191)
def test_network_flow_limited_capacity(self): # A network flow problem with supply and demand at nodes # and with costs and capacities along directed edges. # http://blog.sommer-forst.de/2013/04/10/ cost = [2, 2, 1, 3, 1] bounds = [ [0, 4], [0, 2], [0, 2], [0, 3], [0, 5]] n, p = -1, 1 A_eq = [ [n, n, 0, 0, 0], [p, 0, n, n, 0], [0, p, p, 0, n], [0, 0, 0, p, p]] b_eq = [-4, 0, 0, 4] if self.method == "simplex": # Including the callback here ensures the solution can be # calculated correctly, even when phase 1 terminated # with some of the artificial variables as pivots # (i.e. basis[:m] contains elements corresponding to # the artificial variables) res = linprog(c=cost, A_eq=A_eq, b_eq=b_eq, bounds=bounds, method=self.method, options=self.options, callback=lambda x, **kwargs: None) else: with suppress_warnings() as sup: sup.filter(RuntimeWarning, "scipy.linalg.solve\nIll...") sup.filter(OptimizeWarning, "A_eq does not appear...") res = linprog(c=cost, A_eq=A_eq, b_eq=b_eq, bounds=bounds, method=self.method, options=self.options) _assert_success(res, desired_fun=14)
def test_integration(): rtol = 1e-3 atol = 1e-6 y0 = [1/3, 2/9] for vectorized, method, t_span, jac in product( [False, True], ['RK23', 'RK45', 'Radau', 'BDF', 'LSODA'], [[5, 9], [5, 1]], [None, jac_rational, jac_rational_sparse]): if vectorized: fun = fun_rational_vectorized else: fun = fun_rational with suppress_warnings() as sup: sup.filter(UserWarning, "The following arguments have no effect for a chosen solver: `jac`") res = solve_ivp(fun, t_span, y0, rtol=rtol, atol=atol, method=method, dense_output=True, jac=jac, vectorized=vectorized) assert_equal(res.t[0], t_span[0]) assert_(res.t_events is None) assert_(res.success) assert_equal(res.status, 0) assert_(res.nfev < 40) if method in ['RK23', 'RK45', 'LSODA']: assert_equal(res.njev, 0) assert_equal(res.nlu, 0) else: assert_(0 < res.njev < 3) assert_(0 < res.nlu < 10) y_true = sol_rational(res.t) e = compute_error(res.y, y_true, rtol, atol) assert_(np.all(e < 5)) tc = np.linspace(*t_span) yc_true = sol_rational(tc) yc = res.sol(tc) e = compute_error(yc, yc_true, rtol, atol) assert_(np.all(e < 5)) tc = (t_span[0] + t_span[-1]) / 2 yc_true = sol_rational(tc) yc = res.sol(tc) e = compute_error(yc, yc_true, rtol, atol) assert_(np.all(e < 5)) # LSODA for some reasons doesn't pass the polynomial through the # previous points exactly after the order change. It might be some # bug in LSOSA implementation or maybe we missing something. if method != 'LSODA': assert_allclose(res.sol(res.t), res.y, rtol=1e-15, atol=1e-15)
def test_convergence(): for solver in params.solvers: for case in params.cases: if solver in case.skip: continue with suppress_warnings() as sup: sup.filter(DeprecationWarning, ".*called without specifying.*") check_convergence(solver, case)
def test_precond_dummy(): case = params.Poisson1D for solver in params.solvers: if solver in case.skip: continue with suppress_warnings() as sup: sup.filter(DeprecationWarning, ".*called without specifying.*") check_precond_dummy(solver, case)
def test_nearest(self): N = 5 x = arange(N) y = arange(N) with suppress_warnings() as sup: sup.filter(DeprecationWarning, "`nearest` is deprecated") assert_allclose(y, nearest(x, y, x+.1)) assert_allclose(y, nearest(x, y, x-.1))
def test_cheb_even_low_attenuation(self): cheb_even_low_at_true = array([1.000000, 0.451924, 0.51027, 0.541338, 0.541338, 0.51027, 0.451924, 1.000000]) with suppress_warnings() as sup: sup.filter(UserWarning, "This window is not suitable") cheb_even = windows.chebwin(8, at=-10) assert_array_almost_equal(cheb_even, cheb_even_low_at_true, decimal=4)
def test_magic_square_bug_7044(self): # test linprog with a problem with a rank-deficient A_eq matrix A, b, c, N = magic_square(3) with suppress_warnings() as sup: sup.filter(OptimizeWarning, "A_eq does not appear...") res = linprog(c, A_eq=A, b_eq=b, bounds=(0, 1), method=self.method, options=self.options) _assert_success(res, desired_fun=1.730550597)
def test_cheb_odd_low_attenuation(self): cheb_odd_low_at_true = array([1.000000, 0.519052, 0.586405, 0.610151, 0.586405, 0.519052, 1.000000]) with suppress_warnings() as sup: sup.filter(UserWarning, "This window is not suitable") cheb_odd = windows.chebwin(7, at=10) assert_array_almost_equal(cheb_odd, cheb_odd_low_at_true, decimal=4)
def test_magic_square_sparse_no_presolve(self): # test linprog with a problem with a rank-deficient A_eq matrix A, b, c, N = magic_square(3) with suppress_warnings() as sup: sup.filter(MatrixRankWarning, "Matrix is exactly singular") sup.filter(OptimizeWarning, "Solving system with option...") o = {key: self.options[key] for key in self.options} o["presolve"] = False res = linprog(c, A_eq=A, b_eq=b, bounds=(0, 1), options=o, method=self.method) _assert_success(res, desired_fun=1.730550597)
def test_L3(self): # Lampinen ([5]) test problem 3 def f(x): x = np.hstack(([0], x)) # 1-indexed to match reference fun = (x[1]**2 + x[2]**2 + x[1] * x[2] - 14 * x[1] - 16 * x[2] + (x[3] - 10)**2 + 4 * (x[4] - 5)**2 + (x[5] - 3)**2 + 2 * (x[6] - 1)**2 + 5 * x[7]**2 + 7 * (x[8] - 11)**2 + 2 * (x[9] - 10)**2 + (x[10] - 7)**2 + 45) return fun # maximize A = np.zeros((4, 11)) A[1, [1, 2, 7, 8]] = -4, -5, 3, -9 A[2, [1, 2, 7, 8]] = -10, 8, 17, -2 A[3, [1, 2, 9, 10]] = 8, -2, -5, 2 A = A[1:, 1:] b = np.array([-105, 0, -12]) def c1(x): x = np.hstack(([0], x)) # 1-indexed to match reference return [ 3 * x[1] - 6 * x[2] - 12 * (x[9] - 8)**2 + 7 * x[10], -3 * (x[1] - 2)**2 - 4 * (x[2] - 3)**2 - 2 * x[3]**2 + 7 * x[4] + 120, -x[1]**2 - 2 * (x[2] - 2)**2 + 2 * x[1] * x[2] - 14 * x[5] + 6 * x[6], -5 * x[1]**2 - 8 * x[2] - (x[3] - 6)**2 + 2 * x[4] + 40, -0.5 * (x[1] - 8)**2 - 2 * (x[2] - 4)**2 - 3 * x[5]**2 + x[6] + 30 ] L = LinearConstraint(A, b, np.inf) N = NonlinearConstraint(c1, 0, np.inf) bounds = [(-10, 10)] * 10 constraints = (L, N) with suppress_warnings() as sup: sup.filter(UserWarning) res = differential_evolution(f, bounds, seed=1234, constraints=constraints, popsize=3) x_opt = (2.171996, 2.363683, 8.773926, 5.095984, 0.9906548, 1.430574, 1.321644, 9.828726, 8.280092, 8.375927) f_opt = 24.3062091 assert_allclose(f(x_opt), f_opt, atol=1e-5) assert_allclose(res.x, x_opt, atol=1e-6) assert_allclose(res.fun, f_opt, atol=1e-5) assert res.success assert_(np.all(A @ res.x >= b)) assert_(np.all(np.array(c1(res.x)) >= 0)) assert_(np.all(res.x >= np.array(bounds)[:, 0])) assert_(np.all(res.x <= np.array(bounds)[:, 1]))
def test_leftright_precond(self): """Check that QMR works with left and right preconditioners""" from scipy.sparse.linalg.dsolve import splu from scipy.sparse.linalg.interface import LinearOperator n = 100 dat = ones(n) A = spdiags([-2*dat, 4*dat, -dat], [-1,0,1],n,n) b = arange(n,dtype='d') L = spdiags([-dat/2, dat], [-1,0], n, n) U = spdiags([4*dat, -dat], [0,1], n, n) with suppress_warnings() as sup: sup.filter(SparseEfficiencyWarning, "splu requires CSC matrix format") L_solver = splu(L) U_solver = splu(U) def L_solve(b): return L_solver.solve(b) def U_solve(b): return U_solver.solve(b) def LT_solve(b): return L_solver.solve(b,'T') def UT_solve(b): return U_solver.solve(b,'T') M1 = LinearOperator((n,n), matvec=L_solve, rmatvec=LT_solve) M2 = LinearOperator((n,n), matvec=U_solve, rmatvec=UT_solve) with suppress_warnings() as sup: sup.filter(DeprecationWarning, ".*called without specifying.*") x,info = qmr(A, b, tol=1e-8, maxiter=15, M1=M1, M2=M2) assert_equal(info,0) assert_normclose(A*x, b, tol=1e-8)
def test_linear2(self): N = 3000 x = arange(N, dtype=float) y = ones((100, N)) * arange(N) new_x = arange(N) + 0.5 with suppress_warnings() as sup: sup.filter(DeprecationWarning, "`linear` is deprecated") new_y = linear(x, y, new_x) assert_allclose(new_y[:5, :5], [[0.5, 1.5, 2.5, 3.5, 4.5], [0.5, 1.5, 2.5, 3.5, 4.5], [0.5, 1.5, 2.5, 3.5, 4.5], [0.5, 1.5, 2.5, 3.5, 4.5], [0.5, 1.5, 2.5, 3.5, 4.5]])
def test_remove_redundancy_infeasibility(self): m, n = 10, 10 c = np.random.rand(n) A0 = np.random.rand(m, n) b0 = np.random.rand(m) A0[-1, :] = 2 * A0[-2, :] b0[-1] *= -1 with suppress_warnings() as sup: sup.filter(OptimizeWarning, "A_eq does not appear...") res = linprog(c, A_eq=A0, b_eq=b0, method=self.method, options=self.options) _assert_infeasible(res)
def test_docformat(): with suppress_warnings() as sup: sup.filter(category=DeprecationWarning) udd = doccer.unindent_dict(doc_dict) formatted = doccer.docformat(docstring, udd) assert_equal(formatted, filled_docstring) single_doc = 'Single line doc %(strtest1)s' formatted = doccer.docformat(single_doc, doc_dict) # Note - initial indent of format string does not # affect subsequent indent of inserted parameter assert_equal(formatted, """Single line doc Another test with some indent""")
def test_truncate(self): np.random.seed(1234) A = np.random.rand(30, 30) + np.eye(30) b = np.random.rand(30) for truncate in ['oldest', 'smallest']: with suppress_warnings() as sup: sup.filter(DeprecationWarning, ".*called without specifying.*") x, info = gcrotmk(A, b, m=10, k=10, truncate=truncate, tol=1e-4, maxiter=200) assert_equal(info, 0) assert_allclose(A.dot(x) - b, 0, atol=1e-3)
def test_padecases_dtype_sparse_complex(self): # float32 and complex64 lead to errors in spsolve/UMFpack dtype = np.complex128 for scale in [1e-2, 1e-1, 5e-1, 1, 10]: a = scale * speye(3, 3, dtype=dtype, format='csc') e = exp(scale) * eye(3, dtype=dtype) with suppress_warnings() as sup: sup.filter( SparseEfficiencyWarning, "Changing the sparsity structure of a csc_matrix is expensive." ) assert_array_almost_equal_nulp(expm(a).toarray(), e, nulp=100)
def test_ellip_potential(): def change_coefficient(lambda1, mu, nu, h2, k2): x = sqrt(lambda1**2 * mu**2 * nu**2 / (h2 * k2)) y = sqrt( (lambda1**2 - h2) * (mu**2 - h2) * (h2 - nu**2) / (h2 * (k2 - h2))) z = sqrt( (lambda1**2 - k2) * (k2 - mu**2) * (k2 - nu**2) / (k2 * (k2 - h2))) return x, y, z def solid_int_ellip(lambda1, mu, nu, n, p, h2, k2): return (ellip_harm(h2, k2, n, p, lambda1) * ellip_harm(h2, k2, n, p, mu) * ellip_harm(h2, k2, n, p, nu)) def solid_int_ellip2(lambda1, mu, nu, n, p, h2, k2): return (ellip_harm_2(h2, k2, n, p, lambda1) * ellip_harm(h2, k2, n, p, mu) * ellip_harm(h2, k2, n, p, nu)) def summation(lambda1, mu1, nu1, lambda2, mu2, nu2, h2, k2): tol = 1e-8 sum1 = 0 for n in range(20): xsum = 0 for p in range(1, 2 * n + 2): xsum += (4 * pi * (solid_int_ellip(lambda2, mu2, nu2, n, p, h2, k2) * solid_int_ellip2(lambda1, mu1, nu1, n, p, h2, k2)) / (ellip_normal(h2, k2, n, p) * (2 * n + 1))) if abs(xsum) < 0.1 * tol * abs(sum1): break sum1 += xsum return sum1, xsum def potential(lambda1, mu1, nu1, lambda2, mu2, nu2, h2, k2): x1, y1, z1 = change_coefficient(lambda1, mu1, nu1, h2, k2) x2, y2, z2 = change_coefficient(lambda2, mu2, nu2, h2, k2) res = sqrt((x2 - x1)**2 + (y2 - y1)**2 + (z2 - z1)**2) return 1 / res pts = [ (120, sqrt(19), 2, 41, sqrt(17), 2, 15, 25), (120, sqrt(16), 3.2, 21, sqrt(11), 2.9, 11, 20), ] with suppress_warnings() as sup: sup.filter(IntegrationWarning, "The occurrence of roundoff error") sup.filter(IntegrationWarning, "The maximum number of subdivisions") for p in pts: err_msg = repr(p) exact = potential(*p) result, last_term = summation(*p) assert_allclose(exact, result, atol=0, rtol=1e-8, err_msg=err_msg) assert_(abs(result - exact) < 10 * abs(last_term), err_msg)
def test_intermediate_overlow(): # Make sure we avoid overflow in situations where cosh/sinh would # overflow but the product with sin/cos would not sinpi_pts = [complex(1 + 1e-14, 227), complex(1e-35, 250), complex(1e-301, 445)] # Data generated with mpmath sinpi_std = [complex(-8.113438309924894e+295, -np.inf), complex(1.9507801934611995e+306, np.inf), complex(2.205958493464539e+306, np.inf)] with suppress_warnings() as sup: sup.filter(RuntimeWarning, "invalid value encountered in multiply") for p, std in zip(sinpi_pts, sinpi_std): assert_allclose(sinpi(p), std) # Test for cosine, less interesting because cos(0) = 1. p = complex(0.5 + 1e-14, 227) std = complex(-8.113438309924894e+295, -np.inf) with suppress_warnings() as sup: sup.filter(RuntimeWarning, "invalid value encountered in multiply") assert_allclose(cospi(p), std)
def test_alternate_initial_point(self): # Test with a rather large problem (400 variables, # 40 constraints) generated by https://gist.github.com/denis-bz/8647461 # use "improved" initial point A, b, c = lpgen_2d(20, 20) with suppress_warnings() as sup: sup.filter(RuntimeWarning, "scipy.linalg.solve\nIll...") sup.filter(OptimizeWarning, "Solving system with option...") res = linprog(c, A_ub=A, b_ub=b, method=self.method, options={"ip": True, "disp": True}) # ip code is independent of sparse/dense _assert_success(res, desired_fun=-64.049494229)
def test_delaunay(self): # Smoke test fig = plt.figure() obj = Delaunay(self.points) s_before = obj.simplices.copy() with suppress_warnings() as sup: # filter can be removed when matplotlib 1.x is dropped sup.filter(message="The ishold function was deprecated in version") r = delaunay_plot_2d(obj, ax=fig.gca()) assert_array_equal(obj.simplices, s_before) # shouldn't modify assert_(r is fig) delaunay_plot_2d(obj, ax=fig.gca())
def test_windowfunc_basics(): for window_name, params in window_funcs: window = getattr(windows, window_name) with suppress_warnings() as sup: sup.filter(UserWarning, "This window is not suitable") if window_name in ('slepian', 'hanning'): sup.filter(DeprecationWarning) # Check symmetry for odd and even lengths w1 = window(8, *params, sym=True) w2 = window(7, *params, sym=False) assert_array_almost_equal(w1[:-1], w2) w1 = window(9, *params, sym=True) w2 = window(8, *params, sym=False) assert_array_almost_equal(w1[:-1], w2) # Check that functions run and output lengths are correct assert_equal(len(window(6, *params, sym=True)), 6) assert_equal(len(window(6, *params, sym=False)), 6) assert_equal(len(window(7, *params, sym=True)), 7) assert_equal(len(window(7, *params, sym=False)), 7) # Check invalid lengths assert_raises(ValueError, window, 5.5, *params) assert_raises(ValueError, window, -7, *params) # Check degenerate cases assert_array_equal(window(0, *params, sym=True), []) assert_array_equal(window(0, *params, sym=False), []) assert_array_equal(window(1, *params, sym=True), [1]) assert_array_equal(window(1, *params, sym=False), [1]) # Check dtype assert_(window(0, *params, sym=True).dtype == 'float') assert_(window(0, *params, sym=False).dtype == 'float') assert_(window(1, *params, sym=True).dtype == 'float') assert_(window(1, *params, sym=False).dtype == 'float') assert_(window(6, *params, sym=True).dtype == 'float') assert_(window(6, *params, sym=False).dtype == 'float') # Check normalization assert_array_less(window(10, *params, sym=True), 1.01) assert_array_less(window(10, *params, sym=False), 1.01) assert_array_less(window(9, *params, sym=True), 1.01) assert_array_less(window(9, *params, sym=False), 1.01) # Check that DFT-even spectrum is purely real for odd and even assert_allclose(fftpack.fft(window(10, *params, sym=False)).imag, 0, atol=1e-14) assert_allclose(fftpack.fft(window(11, *params, sym=False)).imag, 0, atol=1e-14)
def test_imread(): lp = os.path.join(os.path.dirname(__file__), 'dots.png') with suppress_warnings() as sup: # PIL causes a Py3k ResourceWarning sup.filter(message="unclosed file") sup.filter(DeprecationWarning) img = ndi.imread(lp, mode="RGB") assert_array_equal(img.shape, (300, 420, 3)) with suppress_warnings() as sup: # PIL causes a Py3k ResourceWarning sup.filter(message="unclosed file") sup.filter(DeprecationWarning) img = ndi.imread(lp, flatten=True) assert_array_equal(img.shape, (300, 420)) with open(lp, 'rb') as fobj: with suppress_warnings() as sup: sup.filter(DeprecationWarning) img = ndi.imread(fobj, mode="RGB") assert_array_equal(img.shape, (300, 420, 3))
def test_regression_2359(self): # Check regression --- for certain point sets, gradient # estimation could end up in an infinite loop points = np.load(data_file('estimate_gradients_hang.npy')) values = np.random.rand(points.shape[0]) tri = qhull.Delaunay(points) # This should not hang with suppress_warnings() as sup: sup.filter(interpnd.GradientEstimationWarning, "Gradient estimation did not converge") interpnd.estimate_gradients_2d_global(tri, values, maxiter=1)
def do_solve(**kw): count[0] = 0 with suppress_warnings() as sup: sup.filter(DeprecationWarning, ".*called without specifying.*") x0, flag = lgmres(A, b, x0=zeros(A.shape[0]), inner_m=6, tol=1e-14, **kw) count_0 = count[0] assert_(allclose(A * x0, b, rtol=1e-12, atol=1e-12), norm(A * x0 - b)) return x0, count_0
def test_imread_4bit(): # pattern4bit.png is a 12(h) x 31(w) grayscale image with bit depth 4. # The value in row j and column i is maximum(j, i) % 16. # When scaled up to 8 bits, the values become [0, 17, 34, ..., 255]. filename = os.path.join(datapath, 'data', 'pattern4bit.png') with open(filename, 'rb') as f: with suppress_warnings() as sup: sup.filter(DeprecationWarning) im = misc.imread(f) assert_equal(im.dtype, np.uint8) j, i = np.meshgrid(np.arange(12), np.arange(31), indexing='ij') expected = 17 * (np.maximum(j, i) % 16).astype(np.uint8) assert_equal(im, expected)
def test_linear_constant(self): x = [1, 1, 1, 2, 2, 2, 3, 3, 3] y = [1, 2, 3, 1, 2, 3, 1, 2, 3] z = [3, 3, 3, 3, 3, 3, 3, 3, 3] s = 0.1 tx = [1 + s, 3 - s] ty = [1 + s, 3 - s] with suppress_warnings() as sup: r = sup.record(UserWarning, "\nThe coefficients of the spline") lut = LSQBivariateSpline(x, y, z, tx, ty, kx=1, ky=1) assert_equal(len(r), 1) assert_almost_equal(lut(2, 2), 3.)
def test_sparse_solve_options(self): A, b, c, N = magic_square(3) with suppress_warnings() as sup: sup.filter(OptimizeWarning, "A_eq does not appear...") sup.filter(OptimizeWarning, "Invalid permc_spec option") o = {key: self.options[key] for key in self.options} permc_specs = ('NATURAL', 'MMD_ATA', 'MMD_AT_PLUS_A', 'COLAMD', 'ekki-ekki-ekki') for permc_spec in permc_specs: o["permc_spec"] = permc_spec res = linprog(c, A_eq=A, b_eq=b, bounds=(0, 1), method=self.method, options=o) _assert_success(res, desired_fun=1.730550597)
def check_rvs_broadcast(distfunc, distname, allargs, shape, shape_only, otype): np.random.seed(123) with suppress_warnings() as sup: # frechet_l and frechet_r are deprecated, so all their # methods generate DeprecationWarnings. sup.filter(category=DeprecationWarning, message=".*frechet_") sample = distfunc.rvs(*allargs) assert_equal(sample.shape, shape, "%s: rvs failed to broadcast" % distname) if not shape_only: rvs = np.vectorize(lambda *allargs: distfunc.rvs(*allargs), otypes=otype) np.random.seed(123) expected = rvs(*allargs) assert_allclose(sample, expected, rtol=1e-15)
def test_read_4(): for mmap in [False, True]: with suppress_warnings() as sup: sup.filter(wavfile.WavFileWarning, "Chunk .non-data. not understood, skipping it") rate, data = wavfile.read(datafile('test-48000Hz-2ch-64bit-float-le-wavex.wav'), mmap=mmap) assert_equal(rate, 48000) assert_(np.issubdtype(data.dtype, np.float64)) assert_equal(data.shape, (480, 2)) del data
def test_itemset_no_segfault_on_readonly(): # Regression test for ticket #1202. # Open the test file in read-only mode. filename = pjoin(TEST_DATA_PATH, 'example_1.nc') with suppress_warnings() as sup: sup.filter(RuntimeWarning, "Cannot close a netcdf_file opened with mmap=True, when netcdf_variables or arrays referring to its data still exist") with netcdf_file(filename, 'r', mmap=True) as f: time_var = f.variables['time'] # time_var.assignValue(42) should raise a RuntimeError--not seg. fault! assert_raises(RuntimeError, time_var.assignValue, 42)
def test_isintlike(self): assert_equal(sputils.isintlike(-4), True) assert_equal(sputils.isintlike(np.array(3)), True) assert_equal(sputils.isintlike(np.array([3])), False) with suppress_warnings() as sup: sup.filter(DeprecationWarning, "Inexact indices into sparse matrices are deprecated") assert_equal(sputils.isintlike(3.0), True) assert_equal(sputils.isintlike(2.5), False) assert_equal(sputils.isintlike(1 + 3j), False) assert_equal(sputils.isintlike((1,)), False) assert_equal(sputils.isintlike((1, 2)), False)
def test_romb_gh_3731(self): # Check that romb makes maximal use of data points x = np.arange(2*2*2*2+1) y = np.cos(0.2*x) val = romb(y) val2, err = quad(lambda x: np.cos(0.2*x), x.min(), x.max()) assert_allclose(val, val2, rtol=1e-8, atol=0) # should be equal to romb with 2**k+1 samples with suppress_warnings() as sup: sup.filter(AccuracyWarning, "divmax .4. exceeded") val3 = romberg(lambda x: np.cos(0.2*x), x.min(), x.max(), divmax=4) assert_allclose(val, val3, rtol=1e-12, atol=0)
def test_list_of_problems(self): for prob in self.list_of_problems: with suppress_warnings() as sup: sup.filter(UserWarning) result = minimize(prob.fun, prob.x0, method=self.method, bounds=prob.bounds, constraints=prob.constr) assert_array_almost_equal(result.x, prob.x_opt, decimal=3)
def test_L8(self): def f(x): x = np.hstack(([0], x)) # 1-indexed to match reference fun = 3 * x[1] + 0.000001 * x[1]**3 + 2 * x[2] + 0.000002 / 3 * x[ 2]**3 return fun A = np.zeros((3, 5)) A[1, [4, 3]] = 1, -1 A[2, [3, 4]] = 1, -1 A = A[1:, 1:] b = np.array([-.55, -.55]) def c1(x): x = np.hstack(([0], x)) # 1-indexed to match reference return [ 1000 * np.sin(-x[3] - 0.25) + 1000 * np.sin(-x[4] - 0.25) + 894.8 - x[1], 1000 * np.sin(x[3] - 0.25) + 1000 * np.sin(x[3] - x[4] - 0.25) + 894.8 - x[2], 1000 * np.sin(x[4] - 0.25) + 1000 * np.sin(x[4] - x[3] - 0.25) + 1294.8 ] L = LinearConstraint(A, b, np.inf) N = NonlinearConstraint(c1, np.full(3, -0.001), np.full(3, 0.001)) bounds = [(0, 1200)] * 2 + [(-.55, .55)] * 2 constraints = (L, N) with suppress_warnings() as sup: sup.filter(UserWarning) res = differential_evolution(f, bounds, strategy='rand1bin', seed=1234, constraints=constraints, maxiter=5000) x_opt = (679.9453, 1026.067, 0.1188764, -0.3962336) f_opt = 5126.4981 assert_allclose(f(x_opt), f_opt, atol=1e-3) assert_allclose(res.x[:2], x_opt[:2], atol=2e-3) assert_allclose(res.x[2:], x_opt[2:], atol=2e-3) assert_allclose(res.fun, f_opt, atol=2e-2) assert res.success assert_(np.all(A @ res.x >= b)) assert_(np.all(np.array(c1(res.x)) >= -0.001)) assert_(np.all(np.array(c1(res.x)) <= 0.001)) assert_(np.all(res.x >= np.array(bounds)[:, 0])) assert_(np.all(res.x <= np.array(bounds)[:, 1]))
def test_warn_mixed_constraints(self): # warns about inefficiency of mixed equality/inequality constraints fun = lambda x: (x[0] - 1)**2 + (x[1] - 2.5)**2 + (x[2] - 0.75)**2 cons = NonlinearConstraint(lambda x: [x[0]**2 - x[1], x[1] - x[2]], [1.1, .8], [1.1, 1.4]) bnds = ((0, None), (0, None), (0, None)) with suppress_warnings() as sup: sup.filter(UserWarning, "delta_grad == 0.0") _assert_warns(OptimizeWarning, minimize, fun, (2, 0, 1), method=self.method, bounds=bnds, constraints=cons)
def check_fit_args(distfn, arg, rvs): with np.errstate(all='ignore'), suppress_warnings() as sup: sup.filter(category=DeprecationWarning, message=".*frechet_") sup.filter(category=RuntimeWarning, message="The shape parameter of the erlang") sup.filter(category=RuntimeWarning, message="floating point number truncated") vals = distfn.fit(rvs) vals2 = distfn.fit(rvs, optimizer='powell') # Only check the length of the return # FIXME: should check the actual results to see if we are 'close' # to what was created --- but what is 'close' enough npt.assert_(len(vals) == 2 + len(arg)) npt.assert_(len(vals2) == 2 + len(arg))
def test_L4(self): # Lampinen ([5]) test problem 4 def f(x): return np.sum(x[:3]) A = np.zeros((4, 9)) A[1, [4, 6]] = 0.0025, 0.0025 A[2, [5, 7, 4]] = 0.0025, 0.0025, -0.0025 A[3, [8, 5]] = 0.01, -0.01 A = A[1:, 1:] b = np.array([1, 1, 1]) def c1(x): x = np.hstack(([0], x)) # 1-indexed to match reference return [ x[1] * x[6] - 833.33252 * x[4] - 100 * x[1] + 83333.333, x[2] * x[7] - 1250 * x[5] - x[2] * x[4] + 1250 * x[4], x[3] * x[8] - 1250000 - x[3] * x[5] + 2500 * x[5] ] L = LinearConstraint(A, -np.inf, 1) N = NonlinearConstraint(c1, 0, np.inf) bounds = [(100, 10000)] + [(1000, 10000)] * 2 + [(10, 1000)] * 5 constraints = (L, N) with suppress_warnings() as sup: sup.filter(UserWarning) res = differential_evolution(f, bounds, strategy='rand1bin', seed=1234, constraints=constraints, popsize=3) f_opt = 7049.248 x_opt = [ 579.306692, 1359.97063, 5109.9707, 182.0177, 295.601172, 217.9823, 286.416528, 395.601172 ] assert_allclose(f(x_opt), f_opt, atol=0.001) assert_allclose(res.fun, f_opt, atol=0.001) assert_allclose(res.x, x_opt, atol=0.002) assert res.success assert_(np.all(A @ res.x <= b)) assert_(np.all(np.array(c1(res.x)) >= 0)) assert_(np.all(res.x >= np.array(bounds)[:, 0])) assert_(np.all(res.x <= np.array(bounds)[:, 1]))