class TestNumericalInverseHermite: # / (1 +sin(2 Pi x))/2 if |x| <= 1 # f(x) = < # \ 0 otherwise # Taken from UNU.RAN test suite (from file t_hinv.c) class dist0: def pdf(self, x): return 0.5 * (1. + np.sin(2. * np.pi * x)) def dpdf(self, x): return np.pi * np.cos(2. * np.pi * x) def cdf(self, x): return (1. + 2. * np.pi * (1 + x) - np.cos(2. * np.pi * x)) / (4. * np.pi) def support(self): return -1, 1 # / Max(sin(2 Pi x)),0)Pi/2 if -1 < x <0.5 # f(x) = < # \ 0 otherwise # Taken from UNU.RAN test suite (from file t_hinv.c) class dist1: def pdf(self, x): if (x <= -0.5): return np.sin((2. * np.pi) * x) * 0.5 * np.pi if (x < 0.): return 0. if (x <= 0.5): return np.sin((2. * np.pi) * x) * 0.5 * np.pi def dpdf(self, x): if (x <= -0.5): return np.cos((2. * np.pi) * x) * np.pi * np.pi if (x < 0.): return 0. if (x <= 0.5): return np.cos((2. * np.pi) * x) * np.pi * np.pi def cdf(self, x): if (x <= -0.5): return 0.25 * (1 - np.cos((2. * np.pi) * x)) if (x < 0.): return 0.5 if (x <= 0.5): return 0.75 - 0.25 * np.cos((2. * np.pi) * x) def support(self): return -1, 0.5 dists = [dist0(), dist1()] # exact mean and variance of the distributions in the list dists mv0 = [-1 / (2 * np.pi), 1 / 3 - 1 / (4 * np.pi * np.pi)] mv1 = [-1 / 4, 3 / 8 - 1 / (2 * np.pi * np.pi) - 1 / 16] mvs = [mv0, mv1] @pytest.mark.parametrize("dist, mv_ex", zip(dists, mvs)) @pytest.mark.parametrize("order", [3, 5]) def test_basic(self, dist, mv_ex, order): rng = NumericalInverseHermite(dist, order=order, random_state=42) check_cont_samples(rng, dist, mv_ex) # test domains with inf + nan in them. need to write a custom test for # this because not all methods support infinite tails. @pytest.mark.parametrize("domain, err, msg", inf_nan_domains) def test_inf_nan_domains(self, domain, err, msg): with pytest.raises(err, match=msg): NumericalInverseHermite(StandardNormal(), domain=domain) @pytest.mark.xslow @pytest.mark.parametrize(("distname", "shapes"), distcont) def test_basic_all_scipy_dists(self, distname, shapes): slow_dists = {'ksone', 'kstwo', 'levy_stable', 'skewnorm'} fail_dists = { 'beta', 'gausshyper', 'geninvgauss', 'ncf', 'nct', 'norminvgauss', 'genhyperbolic', 'studentized_range', 'vonmises', 'kappa4', 'invgauss', 'wald' } if distname in slow_dists: pytest.skip("Distribution is too slow") if distname in fail_dists: # specific reasons documented in gh-13319 # https://github.com/scipy/scipy/pull/13319#discussion_r626188955 pytest.xfail("Fails - usually due to inaccurate CDF/PDF") np.random.seed(0) dist = getattr(stats, distname)(*shapes) with np.testing.suppress_warnings() as sup: sup.filter(RuntimeWarning, "overflow encountered") sup.filter(RuntimeWarning, "divide by zero") sup.filter(RuntimeWarning, "invalid value encountered") fni = NumericalInverseHermite(dist) x = np.random.rand(10) p_tol = np.max(np.abs(dist.ppf(x) - fni.ppf(x)) / np.abs(dist.ppf(x))) u_tol = np.max(np.abs(dist.cdf(fni.ppf(x)) - x)) assert p_tol < 1e-8 assert u_tol < 1e-12 def test_input_validation(self): match = r"`order` must be either 1, 3, or 5." with pytest.raises(ValueError, match=match): NumericalInverseHermite(StandardNormal(), order=2) match = "`cdf` required but not found" with pytest.raises(ValueError, match=match): NumericalInverseHermite("norm") match = "could not convert string to float" with pytest.raises(ValueError, match=match): NumericalInverseHermite(StandardNormal(), u_resolution='ekki') match = "`max_intervals' must be..." with pytest.raises(ValueError, match=match): NumericalInverseHermite(StandardNormal(), max_intervals=-1) match = "`qmc_engine` must be an instance of..." with pytest.raises(ValueError, match=match): fni = NumericalInverseHermite(StandardNormal()) fni.qrvs(qmc_engine=0) if NumpyVersion(np.__version__) >= '1.18.0': # issues with QMCEngines and old NumPy fni = NumericalInverseHermite(StandardNormal()) match = "`d` must be consistent with dimension of `qmc_engine`." with pytest.raises(ValueError, match=match): fni.qrvs(d=3, qmc_engine=stats.qmc.Halton(2)) rngs = [None, 0, np.random.RandomState(0)] if NumpyVersion(np.__version__) >= '1.18.0': rngs.append(np.random.default_rng(0)) # type: ignore sizes = [(None, tuple()), (8, (8, )), ((4, 5, 6), (4, 5, 6))] @pytest.mark.parametrize('rng', rngs) @pytest.mark.parametrize('size_in, size_out', sizes) def test_RVS(self, rng, size_in, size_out): dist = StandardNormal() fni = NumericalInverseHermite(dist) rng2 = deepcopy(rng) rvs = fni.rvs(size=size_in, random_state=rng) if size_in is not None: assert (rvs.shape == size_out) if rng2 is not None: rng2 = check_random_state(rng2) uniform = rng2.uniform(size=size_in) rvs2 = stats.norm.ppf(uniform) assert_allclose(rvs, rvs2) if NumpyVersion(np.__version__) >= '1.18.0': qrngs = [None, stats.qmc.Sobol(1, seed=0), stats.qmc.Halton(3, seed=0)] else: qrngs = [] # `size=None` should not add anything to the shape, `size=1` should sizes = [(None, tuple()), (1, (1, )), (4, (4, )), ((4, ), (4, )), ((2, 4), (2, 4))] # type: ignore # Neither `d=None` nor `d=1` should add anything to the shape ds = [(None, tuple()), (1, tuple()), (3, (3, ))] @pytest.mark.parametrize('qrng', qrngs) @pytest.mark.parametrize('size_in, size_out', sizes) @pytest.mark.parametrize('d_in, d_out', ds) def test_QRVS(self, qrng, size_in, size_out, d_in, d_out): dist = StandardNormal() fni = NumericalInverseHermite(dist) # If d and qrng.d are inconsistent, an error is raised if d_in is not None and qrng is not None and qrng.d != d_in: match = "`d` must be consistent with dimension of `qmc_engine`." with pytest.raises(ValueError, match=match): fni.qrvs(size_in, d=d_in, qmc_engine=qrng) return # Sometimes d is really determined by qrng if d_in is None and qrng is not None and qrng.d != 1: d_out = (qrng.d, ) shape_expected = size_out + d_out qrng2 = deepcopy(qrng) qrvs = fni.qrvs(size=size_in, d=d_in, qmc_engine=qrng) if size_in is not None: assert (qrvs.shape == shape_expected) if qrng2 is not None: uniform = qrng2.random(np.prod(size_in) or 1) qrvs2 = stats.norm.ppf(uniform).reshape(shape_expected) assert_allclose(qrvs, qrvs2, atol=1e-12) def test_QRVS_size_tuple(self): # QMCEngine samples are always of shape (n, d). When `size` is a tuple, # we set `n = prod(size)` in the call to qmc_engine.random, transform # the sample, and reshape it to the final dimensions. When we reshape, # we need to be careful, because the _columns_ of the sample returned # by a QMCEngine are "independent"-ish, but the elements within the # columns are not. We need to make sure that this doesn't get mixed up # by reshaping: qrvs[..., i] should remain "independent"-ish of # qrvs[..., i+1], but the elements within qrvs[..., i] should be # transformed from the same low-discrepancy sequence. if NumpyVersion(np.__version__) <= '1.18.0': pytest.skip("QMC doesn't play well with old NumPy") dist = StandardNormal() fni = NumericalInverseHermite(dist) size = (3, 4) d = 5 qrng = stats.qmc.Halton(d, seed=0) qrng2 = stats.qmc.Halton(d, seed=0) uniform = qrng2.random(np.prod(size)) qrvs = fni.qrvs(size=size, d=d, qmc_engine=qrng) qrvs2 = stats.norm.ppf(uniform) for i in range(d): sample = qrvs[..., i] sample2 = qrvs2[:, i].reshape(size) assert_allclose(sample, sample2, atol=1e-12) def test_inaccurate_CDF(self): # CDF function with inaccurate tail cannot be inverted; see gh-13319 # https://github.com/scipy/scipy/pull/13319#discussion_r626188955 shapes = (2.3098496451481823, 0.6268795430096368) match = ("98 : one or more intervals very short; possibly due to " "numerical problems with a pole or very flat tail") # fails with default tol with pytest.warns(RuntimeWarning, match=match): NumericalInverseHermite(stats.beta(*shapes)) # no error with coarser tol NumericalInverseHermite(stats.beta(*shapes), u_resolution=1e-8) def test_custom_distribution(self): dist1 = StandardNormal() fni1 = NumericalInverseHermite(dist1) dist2 = stats.norm() fni2 = NumericalInverseHermite(dist2) assert_allclose(fni1.rvs(random_state=0), fni2.rvs(random_state=0)) u = [ # check the correctness of the PPF for equidistant points between # 0.02 and 0.98. np.linspace(0., 1., num=10000), # test the PPF method for empty arrays [], [[]], # test if nans and infs return nan result. [np.nan], [-np.inf, np.nan, np.inf], # test if a scalar is returned for a scalar input. 0, # test for arrays with nans, values greater than 1 and less than 0, # and some valid values. [[np.nan, 0.5, 0.1], [0.2, 0.4, np.inf], [-2, 3, 4]] ] @pytest.mark.parametrize("u", u) def test_ppf(self, u): dist = StandardNormal() rng = NumericalInverseHermite(dist, u_resolution=1e-12) # Older versions of NumPy throw RuntimeWarnings for comparisons # with nan. with suppress_warnings() as sup: sup.filter(RuntimeWarning, "invalid value encountered in greater") sup.filter(RuntimeWarning, "invalid value encountered in " "greater_equal") sup.filter(RuntimeWarning, "invalid value encountered in less") sup.filter(RuntimeWarning, "invalid value encountered in " "less_equal") res = rng.ppf(u) expected = stats.norm.ppf(u) assert_allclose(res, expected, rtol=1e-9, atol=3e-10) assert res.shape == expected.shape def test_u_error(self): dist = StandardNormal() rng = NumericalInverseHermite(dist, u_resolution=1e-10) max_error, mae = rng.u_error() assert max_error < 1e-10 assert mae <= max_error with suppress_warnings() as sup: # ignore warning about u-resolution being too small. sup.filter(RuntimeWarning) rng = NumericalInverseHermite(dist, u_resolution=1e-14) max_error, mae = rng.u_error() assert max_error < 1e-14 assert mae <= max_error def test_deprecations(self): msg = ("`tol` has been deprecated and replaced with `u_resolution`. " "It will be completely removed in a future release.") with pytest.warns(DeprecationWarning, match=msg): NumericalInverseHermite(StandardNormal(), tol=1e-12)
def test_version_1_point_10(): # regression test for gh-2998. assert_(NumpyVersion("1.9.0") < "1.10.0") assert_(NumpyVersion("1.11.0") < "1.11.1") assert_(NumpyVersion("1.11.0") == "1.11.0") assert_(NumpyVersion("1.99.11") < "1.99.12")
from numpy.lib import NumpyVersion version: NumpyVersion NumpyVersion(b"1.8.0") # E: incompatible type version >= b"1.8.0" # E: Unsupported operand types
def test_version_1_point_10(): # regression test for gh-2998. assert_(NumpyVersion('1.9.0') < '1.10.0') assert_(NumpyVersion('1.11.0') < '1.11.1') assert_(NumpyVersion('1.11.0') == '1.11.0') assert_(NumpyVersion('1.99.11') < '1.99.12')
def test_dev0_a_b_rc_mixed(): assert_(NumpyVersion('1.9.0a2.dev0+f16acvda') == '1.9.0a2.dev0+11111111') assert_(NumpyVersion('1.9.0a2.dev0+6acvda54') < '1.9.0a2')
class TestNumericalInverseHermite: # / (1 +sin(2 Pi x))/2 if |x| <= 1 # f(x) = < # \ 0 otherwise # Taken from UNU.RAN test suite (from file t_hinv.c) class dist0: def pdf(self, x): return 0.5 * (1. + np.sin(2. * np.pi * x)) def dpdf(self, x): return np.pi * np.cos(2. * np.pi * x) def cdf(self, x): return (1. + 2. * np.pi * (1 + x) - np.cos(2. * np.pi * x)) / (4. * np.pi) def support(self): return -1, 1 # / Max(sin(2 Pi x)),0)Pi/2 if -1 < x <0.5 # f(x) = < # \ 0 otherwise # Taken from UNU.RAN test suite (from file t_hinv.c) class dist1: def pdf(self, x): if (x <= -0.5): return np.sin((2. * np.pi) * x) * 0.5 * np.pi if (x < 0.): return 0. if (x <= 0.5): return np.sin((2. * np.pi) * x) * 0.5 * np.pi def dpdf(self, x): if (x <= -0.5): return np.cos((2. * np.pi) * x) * np.pi * np.pi if (x < 0.): return 0. if (x <= 0.5): return np.cos((2. * np.pi) * x) * np.pi * np.pi def cdf(self, x): if (x <= -0.5): return 0.25 * (1 - np.cos((2. * np.pi) * x)) if (x < 0.): return 0.5 if (x <= 0.5): return 0.75 - 0.25 * np.cos((2. * np.pi) * x) def support(self): return -1, 0.5 dists = [dist0(), dist1()] # exact mean and variance of the distributions in the list dists mv0 = [-1 / (2 * np.pi), 1 / 3 - 1 / (4 * np.pi * np.pi)] mv1 = [-1 / 4, 3 / 8 - 1 / (2 * np.pi * np.pi) - 1 / 16] mvs = [mv0, mv1] @pytest.mark.parametrize("dist, mv_ex", zip(dists, mvs)) @pytest.mark.parametrize("order", [3, 5]) def test_basic(self, dist, mv_ex, order): rng = NumericalInverseHermite(dist, order=order, random_state=42) check_cont_samples(rng, dist, mv_ex) # test domains with inf + nan in them. need to write a custom test for # this because not all methods support infinite tails. @pytest.mark.parametrize("domain, err, msg", inf_nan_domains) def test_inf_nan_domains(self, domain, err, msg): with pytest.raises(err, match=msg): NumericalInverseHermite(StandardNormal(), domain=domain) @pytest.mark.xslow @pytest.mark.parametrize(("distname", "shapes"), distcont) def test_basic_all_scipy_dists(self, distname, shapes): slow_dists = {'ksone', 'kstwo', 'levy_stable', 'skewnorm'} fail_dists = { 'beta', 'gausshyper', 'geninvgauss', 'ncf', 'nct', 'norminvgauss', 'genhyperbolic', 'studentized_range', 'vonmises', 'kappa4', 'invgauss', 'wald' } if distname in slow_dists: pytest.skip("Distribution is too slow") if distname in fail_dists: # specific reasons documented in gh-13319 # https://github.com/scipy/scipy/pull/13319#discussion_r626188955 pytest.xfail("Fails - usually due to inaccurate CDF/PDF") np.random.seed(0) dist = getattr(stats, distname)(*shapes) with np.testing.suppress_warnings() as sup: sup.filter(RuntimeWarning, "overflow encountered") sup.filter(RuntimeWarning, "divide by zero") sup.filter(RuntimeWarning, "invalid value encountered") fni = NumericalInverseHermite(dist) x = np.random.rand(10) p_tol = np.max(np.abs(dist.ppf(x) - fni.ppf(x)) / np.abs(dist.ppf(x))) u_tol = np.max(np.abs(dist.cdf(fni.ppf(x)) - x)) assert p_tol < 1e-8 assert u_tol < 1e-12 def test_input_validation(self): match = r"`order` must be either 1, 3, or 5." with pytest.raises(ValueError, match=match): NumericalInverseHermite(StandardNormal(), order=2) match = "`cdf` required but not found" with pytest.raises(ValueError, match=match): NumericalInverseHermite("norm") match = "could not convert string to float" with pytest.raises(ValueError, match=match): NumericalInverseHermite(StandardNormal(), u_resolution='ekki') rngs = [None, 0, np.random.RandomState(0)] if NumpyVersion(np.__version__) >= '1.18.0': rngs.append(np.random.default_rng(0)) # type: ignore sizes = [(None, tuple()), (8, (8, )), ((4, 5, 6), (4, 5, 6))] @pytest.mark.parametrize('rng', rngs) @pytest.mark.parametrize('size_in, size_out', sizes) def test_RVS(self, rng, size_in, size_out): dist = StandardNormal() fni = NumericalInverseHermite(dist) rng2 = deepcopy(rng) rvs = fni.rvs(size=size_in, random_state=rng) if size_in is not None: assert (rvs.shape == size_out) if rng2 is not None: rng2 = check_random_state(rng2) uniform = rng2.uniform(size=size_in) rvs2 = stats.norm.ppf(uniform) assert_allclose(rvs, rvs2) def test_inaccurate_CDF(self): # CDF function with inaccurate tail cannot be inverted; see gh-13319 # https://github.com/scipy/scipy/pull/13319#discussion_r626188955 shapes = (2.3098496451481823, 0.6268795430096368) match = ("98 : one or more intervals very short; possibly due to " "numerical problems with a pole or very flat tail") # fails with default tol with pytest.warns(RuntimeWarning, match=match): NumericalInverseHermite(stats.beta(*shapes)) # no error with coarser tol NumericalInverseHermite(stats.beta(*shapes), u_resolution=1e-8) def test_custom_distribution(self): dist1 = StandardNormal() fni1 = NumericalInverseHermite(dist1) dist2 = stats.norm() fni2 = NumericalInverseHermite(dist2) assert_allclose(fni1.rvs(random_state=0), fni2.rvs(random_state=0)) u = [ # check the correctness of the PPF for equidistant points between # 0.02 and 0.98. np.linspace(0., 1., num=10000), # test the PPF method for empty arrays [], [[]], # test if nans and infs return nan result. [np.nan], [-np.inf, np.nan, np.inf], # test if a scalar is returned for a scalar input. 0, # test for arrays with nans, values greater than 1 and less than 0, # and some valid values. [[np.nan, 0.5, 0.1], [0.2, 0.4, np.inf], [-2, 3, 4]] ] @pytest.mark.parametrize("u", u) def test_ppf(self, u): dist = StandardNormal() rng = NumericalInverseHermite(dist, u_resolution=1e-12) # Older versions of NumPy throw RuntimeWarnings for comparisons # with nan. with suppress_warnings() as sup: sup.filter(RuntimeWarning, "invalid value encountered in greater") sup.filter(RuntimeWarning, "invalid value encountered in " "greater_equal") sup.filter(RuntimeWarning, "invalid value encountered in less") sup.filter(RuntimeWarning, "invalid value encountered in " "less_equal") res = rng.ppf(u) expected = stats.norm.ppf(u) assert_allclose(res, expected, rtol=1e-9, atol=3e-10) assert res.shape == expected.shape def test_u_error(self): dist = StandardNormal() rng = NumericalInverseHermite(dist, u_resolution=1e-10) max_error, mae = rng.u_error() assert max_error < 1e-10 assert mae <= max_error with suppress_warnings() as sup: # ignore warning about u-resolution being too small. sup.filter(RuntimeWarning) rng = NumericalInverseHermite(dist, u_resolution=1e-14) max_error, mae = rng.u_error() assert max_error < 1e-14 assert mae <= max_error
from the individual packages. The end-user should not need to instantiate this class directly. some more info """ from __future__ import division, print_function import os import warnings import numpy as np from ..datbase import DataInterface, DataListInterface, DataType try: from numpy.lib import NumpyVersion numpy114 = NumpyVersion(np.__version__) >= '1.14.0' except ImportError: numpy114 = False class MfList(DataInterface, DataListInterface): """ a generic object for handling transient boundary condition lists Parameters ---------- package : package object The package object (of type :class:`hataripy.pakbase.Package`) to which this MfList will be added. data : varies the data of the transient list (optional). (the default is None)
""" Shared utility functions used by the fitting routines. """ from numpy import (argsort, array, float_, inexact, issubdtype, __version__ as __np_version__) from numpy.core.multiarray import normalize_axis_index from numpy.lib import NumpyVersion __all__ = ['moveaxis', 'preprocess', 'preprocess_pair', 'preprocess_npair'] if NumpyVersion(__np_version__) >= '1.11.0': from numpy import moveaxis else: from numpy import rollaxis def moveaxis(a, start, end): return rollaxis(a, start, normalize_axis_index(end, a.ndim + 1)) def preprocess(x, copy=False, float=False, axis=None): """ Ensure that `x` is a properly formatted numpy array. Proper formatting means at least one dimension, and may include optional copying, reshaping and coersion into a floating point datatype. Parameters ---------- x : array-like
from thread import get_ident except ImportError: from dummy_thread import get_ident import numpy as np from numpy import (concatenate, errstate, array, format_float_positional, format_float_scientific, datetime_as_string, datetime_data, ndarray, ravel, any, longlong, intc, int_, float_, complex_, bool_, flexible) from numpy.core import umath import warnings import contextlib import os from numpy.lib import NumpyVersion if (NumpyVersion(np.__version__) < '1.15.10' or os.environ.get('NUMPY_EXPERIMENTAL_ARRAY_FUNCTION', 0) == 0): raise Exception("numpy __array_function__ must be enabled") def is_ndducktype(val): return hasattr(val, '__array_function__') def is_duckscalar(val): # a simple test of whether a numpy-like type is a scalar and not a 0d array # is if indexing with an empty tuple gives back a scalar. Hopefully that is # not too fragile.n return (isinstance(val, np.generic) or (is_ndducktype(val) and val.shape == () and type(val[()]) is type(val)))
'Object', ] _python_desc = ("homogeneous list or tuple, " "integer, float, complex or bytes") def _is_python(array): return isinstance(array, (tuple, list, int, float, complex, bytes)) _numpy_aliases = [] _numpy_desc = "NumPy array, record or scalar" from numpy.lib import NumpyVersion if NumpyVersion(numpy.__version__) >= NumpyVersion('1.19.0'): def toarray(array, *args, **kwargs): with warnings.catch_warnings(): warnings.simplefilter('error') try: array = numpy.array(array, *args, **kwargs) except numpy.VisibleDeprecationWarning: raise ValueError( 'cannot guess the desired dtype from the input') return array else: toarray = numpy.array
import copy import numpy as np from .grid import Grid, CachedData try: from numpy.lib import NumpyVersion numpy115 = NumpyVersion(np.__version__) >= "1.15.0" except ImportError: numpy115 = False if not numpy115: def flip_numpy115(m, axis=None): """Provide same behavior for np.flip since numpy 1.15.0.""" import numpy.core.numeric as _nx from numpy.core.numeric import asarray if not hasattr(m, "ndim"): m = asarray(m) if axis is None: indexer = (np.s_[::-1], ) * m.ndim else: axis = _nx.normalize_axis_tuple(axis, m.ndim) indexer = [np.s_[:]] * m.ndim for ax in axis: indexer[ax] = np.s_[::-1] indexer = tuple(indexer) return m[indexer] np.flip = flip_numpy115
def setup(self, name): if NumpyVersion(np.__version__) < '1.11.0.dev0': raise NotImplementedError
def test_dev0_a_b_rc_mixed(): assert_(NumpyVersion("1.9.0a2.dev0+f16acvda") == "1.9.0a2.dev0+11111111") assert_(NumpyVersion("1.9.0a2.dev0+6acvda54") < "1.9.0a2")
def test_dev0_version(): assert_(NumpyVersion("1.9.0.dev0+Unknown") < "1.9.0") for ver in ["1.9.0", "1.9.0a1", "1.9.0b2", "1.9.0b2.dev0+ffffffff"]: assert_(NumpyVersion("1.9.0.dev0+f16acvda") < ver) assert_(NumpyVersion("1.9.0.dev0+f16acvda") == "1.9.0.dev0+11111111")
def _axis_nan_policy_test(hypotest, args, kwds, n_samples, n_outputs, paired, unpacker, nan_policy, axis, data_generator): # Tests the 1D and vectorized behavior of hypothesis tests against a # reference implementation (nan_policy_1d with np.ndenumerate) # Some hypothesis tests return a non-iterable that needs an `unpacker` to # extract the statistic and p-value. For those that don't: if not unpacker: def unpacker(res): return res if NumpyVersion(np.__version__) < '1.18.0': pytest.xfail("Generator `permutation` method doesn't support `axis`") rng = np.random.default_rng(0) # Generate multi-dimensional test data with all important combinations # of patterns of nans along `axis` n_repetitions = 3 # number of repetitions of each pattern data_gen_kwds = { 'n_samples': n_samples, 'n_repetitions': n_repetitions, 'axis': axis, 'rng': rng, 'paired': paired } if data_generator == 'mixed': inherent_size = 6 # number of distinct types of patterns data = _mixed_data_generator(**data_gen_kwds) elif data_generator == 'all_nans': inherent_size = 2 # hard-coded in _homogeneous_data_generator data_gen_kwds['all_nans'] = True data = _homogeneous_data_generator(**data_gen_kwds) elif data_generator == 'all_finite': inherent_size = 2 # hard-coded in _homogeneous_data_generator data_gen_kwds['all_nans'] = False data = _homogeneous_data_generator(**data_gen_kwds) output_shape = [n_repetitions] + [inherent_size] * n_samples # To generate reference behavior to compare against, loop over the axis- # slices in data. Make indexing easier by moving `axis` to the end and # broadcasting all samples to the same shape. data_b = [np.moveaxis(sample, axis, -1) for sample in data] data_b = [ np.broadcast_to(sample, output_shape + [sample.shape[-1]]) for sample in data_b ] statistics = np.zeros(output_shape) pvalues = np.zeros(output_shape) for i, _ in np.ndenumerate(statistics): data1d = [sample[i] for sample in data_b] with np.errstate(divide='ignore', invalid='ignore'): try: res1d = nan_policy_1d(hypotest, data1d, unpacker, *args, n_outputs=n_outputs, nan_policy=nan_policy, paired=paired, _no_deco=True, **kwds) # Eventually we'll check the results of a single, vectorized # call of `hypotest` against the arrays `statistics` and # `pvalues` populated using the reference `nan_policy_1d`. # But while we're at it, check the results of a 1D call to # `hypotest` against the reference `nan_policy_1d`. res1db = unpacker( hypotest(*data1d, *args, nan_policy=nan_policy, **kwds)) assert_equal(res1db[0], res1d[0]) if len(res1db) == 2: assert_equal(res1db[1], res1d[1]) # When there is not enough data in 1D samples, many existing # hypothesis tests raise errors instead of returning nans . # For vectorized calls, we put nans in the corresponding elements # of the output. except (RuntimeWarning, ValueError, ZeroDivisionError) as e: # whatever it is, make sure same error is raised by both # `nan_policy_1d` and `hypotest` with pytest.raises(type(e), match=re.escape(str(e))): nan_policy_1d(hypotest, data1d, unpacker, *args, n_outputs=n_outputs, nan_policy=nan_policy, paired=paired, _no_deco=True, **kwds) with pytest.raises(type(e), match=re.escape(str(e))): hypotest(*data1d, *args, nan_policy=nan_policy, **kwds) if any([ str(e).startswith(message) for message in too_small_messages ]): res1d = np.full(n_outputs, np.nan) else: raise e statistics[i] = res1d[0] if len(res1d) == 2: pvalues[i] = res1d[1] # Perform a vectorized call to the hypothesis test. # If `nan_policy == 'raise'`, check that it raises the appropriate error. # If not, compare against the output against `statistics` and `pvalues` if nan_policy == 'raise' and not data_generator == "all_finite": message = 'The input contains nan values' with pytest.raises(ValueError, match=message): hypotest(*data, axis=axis, nan_policy=nan_policy, *args, **kwds) else: with np.errstate(divide='ignore', invalid='ignore'): res = unpacker( hypotest(*data, axis=axis, nan_policy=nan_policy, *args, **kwds)) assert_equal(res[0], statistics) assert_equal(res[0].dtype, statistics.dtype) if len(res) == 2: assert_equal(res[1], pvalues) assert_equal(res[1].dtype, pvalues.dtype)
""" Low-level Kalman filter computation steps with multi-dimensional input arrays. Unlike with the `KalmanFilter <index.html#simdkalman.KalmanFilter>`_ class, all inputs must be numpy arrays. However, their dimensions can flexibly vary form 1 to 3 as long as they are reasonable from the point of view of matrix multiplication and numpy broadcasting rules. Matrix operations are applied on the *last* two axes of the arrays. """ import numpy as np from functools import wraps # work around some numpy glitches associated with different versions from numpy.lib import NumpyVersion _HAVE_MATMUL = NumpyVersion(np.__version__) >= '1.10.0' _EINSUM_OPTS = {} if NumpyVersion(np.__version__) == '1.14.0': # https://github.com/numpy/numpy/issues/10343 _EINSUM_OPTS = {'optimize': False} def ddot(A, B): "Matrix multiplication over last two axes" if _HAVE_MATMUL: return np.matmul(A, B) else: return np.einsum('...ij,...jk->...ik', A, B) def ddot_t_right(A, B): "Matrix multiplication over last 2 axes with right operand transposed" return np.einsum('...ij,...kj->...ik', A, B, **_EINSUM_OPTS)
def test_axis_nan_policy_axis_is_None(hypotest, args, kwds, n_samples, n_outputs, paired, unpacker, nan_policy, data_generator): # check for correct behavior when `axis=None` if not unpacker: def unpacker(res): return res if NumpyVersion(np.__version__) < '1.18.0': pytest.xfail("Generator `permutation` method doesn't support `axis`") rng = np.random.default_rng(0) if data_generator == "empty": data = [rng.random((2, 0)) for i in range(n_samples)] else: data = [rng.random((2, 20)) for i in range(n_samples)] if data_generator == "mixed": masks = [rng.random((2, 20)) > 0.9 for i in range(n_samples)] for sample, mask in zip(data, masks): sample[mask] = np.nan elif data_generator == "all_nans": data = [sample * np.nan for sample in data] data_raveled = [sample.ravel() for sample in data] if nan_policy == 'raise' and data_generator not in {"all_finite", "empty"}: message = 'The input contains nan values' # check for correct behavior whether or not data is 1d to begin with with pytest.raises(ValueError, match=message): hypotest(*data, axis=None, nan_policy=nan_policy, *args, **kwds) with pytest.raises(ValueError, match=message): hypotest(*data_raveled, axis=None, nan_policy=nan_policy, *args, **kwds) else: # behavior of reference implementation with 1d input, hypotest with 1d # input, and hypotest with Nd input should match, whether that means # that outputs are equal or they raise the same exception ea_str, eb_str, ec_str = None, None, None with np.errstate(divide='ignore', invalid='ignore'): try: res1da = nan_policy_1d(hypotest, data_raveled, unpacker, *args, n_outputs=n_outputs, nan_policy=nan_policy, paired=paired, _no_deco=True, **kwds) except (RuntimeWarning, ValueError, ZeroDivisionError) as ea: ea_str = str(ea) try: res1db = unpacker( hypotest(*data_raveled, *args, nan_policy=nan_policy, **kwds)) except (RuntimeWarning, ValueError, ZeroDivisionError) as eb: eb_str = str(eb) try: res1dc = unpacker( hypotest(*data, *args, axis=None, nan_policy=nan_policy, **kwds)) except (RuntimeWarning, ValueError, ZeroDivisionError) as ec: ec_str = str(ec) if ea_str or eb_str or ec_str: assert any([ str(ea_str).startswith(message) for message in too_small_messages ]) assert ea_str == eb_str == ec_str else: assert_equal(res1db, res1da) assert_equal(res1dc, res1da)
def test_terminal_constraints(sys_args): """Test out the ability to handle terminal constraints""" # Create the system sys = ct.ss2io(ct.ss(*sys_args)) # Shortest path to a point is a line Q = np.zeros((2, 2)) R = np.eye(2) cost = opt.quadratic_cost(sys, Q, R) # Set up the terminal constraint to be the origin final_point = [opt.state_range_constraint(sys, [0, 0], [0, 0])] # Create the optimal control problem time = np.arange(0, 3, 1) optctrl = opt.OptimalControlProblem( sys, time, cost, terminal_constraints=final_point) # Find a path to the origin x0 = np.array([4, 3]) res = optctrl.compute_trajectory(x0, squeeze=True, return_x=True) t, u1, x1 = res.time, res.inputs, res.states # Bug prior to SciPy 1.6 will result in incorrect results if NumpyVersion(sp.__version__) < '1.6.0': pytest.xfail("SciPy 1.6 or higher required") np.testing.assert_almost_equal(x1[:,-1], 0, decimal=4) # Make sure it is a straight line Tf = time[-1] if ct.isctime(sys): # Continuous time is not that accurate on the input, so just skip test pass else: # Final point doesn't affect cost => don't need to test np.testing.assert_almost_equal( u1[:, 0:-1], np.kron((-x0/Tf).reshape((2, 1)), np.ones(time.shape))[:, 0:-1]) np.testing.assert_allclose( x1, np.kron(x0.reshape((2, 1)), time[::-1]/Tf), atol=0.1, rtol=0.01) # Re-run using initial guess = optional and make sure nothing changes res = optctrl.compute_trajectory(x0, initial_guess=u1) np.testing.assert_almost_equal(res.inputs, u1) # Re-run using a basis function and see if we get the same answer res = opt.solve_ocp(sys, time, x0, cost, terminal_constraints=final_point, basis=flat.BezierFamily(4, Tf)) np.testing.assert_almost_equal(res.inputs, u1, decimal=2) # Impose some cost on the state, which should change the path Q = np.eye(2) R = np.eye(2) * 0.1 cost = opt.quadratic_cost(sys, Q, R) optctrl = opt.OptimalControlProblem( sys, time, cost, terminal_constraints=final_point) # Turn off warning messages, since we sometimes don't get convergence with warnings.catch_warnings(): warnings.filterwarnings( "ignore", message="unable to solve", category=UserWarning) # Find a path to the origin res = optctrl.compute_trajectory( x0, squeeze=True, return_x=True, initial_guess=u1) t, u2, x2 = res.time, res.inputs, res.states # Not all configurations are able to converge (?) if res.success: np.testing.assert_almost_equal(x2[:,-1], 0) # Make sure that it is *not* a straight line path assert np.any(np.abs(x2 - x1) > 0.1) assert np.any(np.abs(u2) > 1) # Make sure next test is useful # Add some bounds on the inputs constraints = [opt.input_range_constraint(sys, [-1, -1], [1, 1])] optctrl = opt.OptimalControlProblem( sys, time, cost, constraints, terminal_constraints=final_point) res = optctrl.compute_trajectory(x0, squeeze=True, return_x=True) t, u3, x3 = res.time, res.inputs, res.states # Check the answers only if we converged if res.success: np.testing.assert_almost_equal(x3[:,-1], 0, decimal=4) # Make sure we got a new path and didn't violate the constraints assert np.any(np.abs(x3 - x1) > 0.1) np.testing.assert_array_less(np.abs(u3), 1 + 1e-6) # Make sure that infeasible problems are handled sensibly x0 = np.array([10, 3]) with pytest.warns(UserWarning, match="unable to solve"): res = optctrl.compute_trajectory(x0, squeeze=True, return_x=True) assert not res.success
widget.resize(640, 480) widget.show() widget.initialize(force=True) yield widget widget.close() @flaky(max_runs=5) @pytest.mark.order(1) @pytest.mark.parametrize( "lib", [('str', 'class str', [0, 1]), ('numpy.testing', 'numpy.testing', [5, 10])] ) @pytest.mark.skipif( (sys.platform == 'darwin' or NumpyVersion(np.__version__) < NumpyVersion('1.21.0')), reason="Fails on Mac and older versions of Numpy" ) @pytest.mark.skipif( sys.platform.startswith('linux') or os.name == 'nt' and running_in_ci(), reason="Stalls CI frequenly on Linux and Windows" ) def test_get_pydoc(pydocbrowser, qtbot, lib): """ Go to the documentation by url. Regression test for spyder-ide/spyder#10740 """ browser = pydocbrowser element, doc, matches = lib webview = browser.webview
from numpy.lib import NumpyVersion version = NumpyVersion("1.8.0") reveal_type(version.vstring) # E: str reveal_type(version.version) # E: str reveal_type(version.major) # E: int reveal_type(version.minor) # E: int reveal_type(version.bugfix) # E: int reveal_type(version.pre_release) # E: str reveal_type(version.is_devversion) # E: bool reveal_type(version == version) # E: bool reveal_type(version != version) # E: bool reveal_type(version < "1.8.0") # E: bool reveal_type(version <= version) # E: bool reveal_type(version > version) # E: bool reveal_type(version >= "1.8.0") # E: bool
def setup(self, name): from numpy.lib import NumpyVersion if NumpyVersion(np.__version__) < '1.11.0.dev0': raise NotImplementedError
def resize(image, output_shape, order=None, mode='reflect', cval=0, clip=True, preserve_range=False, anti_aliasing=None, anti_aliasing_sigma=None): """Resize image to match a certain size. Performs interpolation to up-size or down-size N-dimensional images. Note that anti-aliasing should be enabled when down-sizing images to avoid aliasing artifacts. For down-sampling with an integer factor also see `skimage.transform.downscale_local_mean`. Parameters ---------- image : ndarray Input image. output_shape : tuple or ndarray Size of the generated output image `(rows, cols[, ...][, dim])`. If `dim` is not provided, the number of channels is preserved. In case the number of input channels does not equal the number of output channels a n-dimensional interpolation is applied. Returns ------- resized : ndarray Resized version of the input. Other parameters ---------------- order : int, optional The order of the spline interpolation, default is 0 if image.dtype is bool and 1 otherwise. The order has to be in the range 0-5. See `skimage.transform.warp` for detail. mode : {'constant', 'edge', 'symmetric', 'reflect', 'wrap'}, optional Points outside the boundaries of the input are filled according to the given mode. Modes match the behaviour of `numpy.pad`. cval : float, optional Used in conjunction with mode 'constant', the value outside the image boundaries. clip : bool, optional Whether to clip the output to the range of values of the input image. This is enabled by default, since higher order interpolation may produce values outside the given input range. preserve_range : bool, optional Whether to keep the original range of values. Otherwise, the input image is converted according to the conventions of `img_as_float`. Also see https://scikit-image.org/docs/dev/user_guide/data_types.html anti_aliasing : bool, optional Whether to apply a Gaussian filter to smooth the image prior to down-scaling. It is crucial to filter when down-sampling the image to avoid aliasing artifacts. If input image data type is bool, no anti-aliasing is applied. anti_aliasing_sigma : {float, tuple of floats}, optional Standard deviation for Gaussian filtering to avoid aliasing artifacts. By default, this value is chosen as (s - 1) / 2 where s is the down-scaling factor, where s > 1. For the up-size case, s < 1, no anti-aliasing is performed prior to rescaling. Notes ----- Modes 'reflect' and 'symmetric' are similar, but differ in whether the edge pixels are duplicated during the reflection. As an example, if an array has values [0, 1, 2] and was padded to the right by four values using symmetric, the result would be [0, 1, 2, 2, 1, 0, 0], while for reflect it would be [0, 1, 2, 1, 0, 1, 2]. Examples -------- >>> from skimage import data >>> from skimage.transform import resize >>> image = data.camera() >>> resize(image, (100, 100)).shape (100, 100) """ output_shape = tuple(output_shape) output_ndim = len(output_shape) input_shape = image.shape if output_ndim > image.ndim: # append dimensions to input_shape input_shape = input_shape + (1, ) * (output_ndim - image.ndim) image = np.reshape(image, input_shape) elif output_ndim == image.ndim - 1: # multichannel case: append shape of last axis output_shape = output_shape + (image.shape[-1], ) elif output_ndim < image.ndim - 1: raise ValueError("len(output_shape) cannot be smaller than the image " "dimensions") if anti_aliasing is None: anti_aliasing = not image.dtype == bool if image.dtype == bool and anti_aliasing: warn("Input image dtype is bool. Gaussian convolution is not defined " "with bool data type. Please set anti_aliasing to False or " "explicitely cast input image to another data type. Starting " "from version 0.19 a ValueError will be raised instead of this " "warning.", FutureWarning, stacklevel=2) factors = (np.asarray(input_shape, dtype=float) / np.asarray(output_shape, dtype=float)) # Translate modes used by np.pad to those used by scipy.ndimage ndi_mode = _to_ndimage_mode(mode) if anti_aliasing: if anti_aliasing_sigma is None: anti_aliasing_sigma = np.maximum(0, (factors - 1) / 2) else: anti_aliasing_sigma = \ np.atleast_1d(anti_aliasing_sigma) * np.ones_like(factors) if np.any(anti_aliasing_sigma < 0): raise ValueError("Anti-aliasing standard deviation must be " "greater than or equal to zero") elif np.any((anti_aliasing_sigma > 0) & (factors <= 1)): warn("Anti-aliasing standard deviation greater than zero but " "not down-sampling along all axes") image = ndi.gaussian_filter(image, anti_aliasing_sigma, cval=cval, mode=ndi_mode) if NumpyVersion(scipy.__version__) >= '1.6.0': # The grid_mode kwarg was introduced in SciPy 1.6.0 order = _validate_interpolation_order(image.dtype, order) zoom_factors = [1 / f for f in factors] image = convert_to_float(image, preserve_range) out = ndi.zoom(image, zoom_factors, order=order, mode=ndi_mode, cval=cval, grid_mode=True) _clip_warp_output(image, out, order, mode, cval, clip) # TODO: Remove the fallback code below once SciPy >= 1.6.0 is required. # 2-dimensional interpolation elif len(output_shape) == 2 or (len(output_shape) == 3 and output_shape[2] == input_shape[2]): rows = output_shape[0] cols = output_shape[1] input_rows = input_shape[0] input_cols = input_shape[1] if rows == 1 and cols == 1: tform = AffineTransform(translation=(input_cols / 2.0 - 0.5, input_rows / 2.0 - 0.5)) else: # 3 control points necessary to estimate exact AffineTransform src_corners = np.array([[1, 1], [1, rows], [cols, rows]]) - 1 dst_corners = np.zeros(src_corners.shape, dtype=np.double) # take into account that 0th pixel is at position (0.5, 0.5) dst_corners[:, 0] = factors[1] * (src_corners[:, 0] + 0.5) - 0.5 dst_corners[:, 1] = factors[0] * (src_corners[:, 1] + 0.5) - 0.5 tform = AffineTransform() tform.estimate(src_corners, dst_corners) # Make sure the transform is exactly metric, to ensure fast warping. tform.params[2] = (0, 0, 1) tform.params[0, 1] = 0 tform.params[1, 0] = 0 out = warp(image, tform, output_shape=output_shape, order=order, mode=mode, cval=cval, clip=clip, preserve_range=preserve_range) else: # n-dimensional interpolation order = _validate_interpolation_order(image.dtype, order) coord_arrays = [factors[i] * (np.arange(d) + 0.5) - 0.5 for i, d in enumerate(output_shape)] coord_map = np.array(np.meshgrid(*coord_arrays, sparse=False, indexing='ij')) image = convert_to_float(image, preserve_range) out = ndi.map_coordinates(image, coord_map, order=order, mode=ndi_mode, cval=cval) _clip_warp_output(image, out, order, mode, cval, clip) return out
def test_dev0_version(): assert_(NumpyVersion('1.9.0.dev0+Unknown') < '1.9.0') for ver in ['1.9.0', '1.9.0a1', '1.9.0b2', '1.9.0b2.dev0+ffffffff']: assert_(NumpyVersion('1.9.0.dev0+f16acvda') < ver) assert_(NumpyVersion('1.9.0.dev0+f16acvda') == '1.9.0.dev0+11111111')
def parse_targets(filename): """ Load a text file with target coordinates. Returns an array of target locations in Galactic coordinates. File description: [NAME] [LON] [LAT] [RADIUS] [COORD] The values of LON and LAT will depend on COORD: COORD = [GAL | CEL | HPX ], LON = [GLON | RA | NSIDE] LAT = [GLAT | DEC | PIX ] """ base, ext = os.path.splitext(filename) if (ext == '.fits'): import fitsio data = fitsio.read(filename) elif (ext == '.txt'): from numpy.lib import NumpyVersion if NumpyVersion(np.__version__) < '1.14.0': data = np.genfromtxt(filename, names=True, dtype=None) else: data = np.genfromtxt(filename, names=True, dtype=None, encoding=None) #data = np.genfromtxt(filename,unpack=True,usecols=list(range(5)),dtype=object,names=True) elif (ext == '.yaml'): import yaml data = [(k, v['kernel']['lon']['value'], v['kernel']['lat']['value'], 0.5, 'CEL') for k, v in yaml.load(open(filename)).items()] data = np.rec.fromrecords( data, names=['name', 'lon', 'lat', 'radius', 'coord']) else: msg = "Unrecognized file type: %s" % filename raise IOError(msg) data = np.atleast_1d(data) data.dtype.names = list(map(str.lower, data.dtype.names)) # Deal with one-line input files #if data.ndim == 1: data = np.array([data]).T names = data['name'] out = data[['lon', 'lat', 'radius']].copy() coord = np.char.lower(data['coord']) gal = (coord == 'gal') cel = (coord == 'cel') hpx = (coord == 'hpx') if cel.any(): glon, glat = cel2gal(data['lon'][cel], data['lat'][cel]) out['lon'][cel] = glon out['lat'][cel] = glat if hpx.any(): glon, glat = pix2ang(data['lat'][hpx], data['lon'][hpx]) out['lon'][hpx] = glon out['lat'][hpx] = glat return names, out.view(np.ndarray)
class TestRDF: def test_generateR(self): r_max = 5 for r_min in [0, 0.05, 0.1, 1.0, 3.0]: bins = round((r_max - r_min) / 0.1) dr = (r_max - r_min) / bins # make sure the radius for each bin is generated correctly r_list = np.array([ r_min + dr * (i + 1 / 2) for i in range(bins) if r_min + dr * (i + 1 / 2) < r_max ]) rdf = freud.density.RDF(bins, r_max, r_min=r_min) npt.assert_allclose(rdf.bin_centers, r_list, rtol=1e-4, atol=1e-4) npt.assert_allclose((rdf.bin_edges + dr / 2)[:-1], r_list, rtol=1e-4, atol=1e-4) def test_attribute_access(self): r_max = 10.0 bins = 10 num_points = 100 box_size = r_max * 3.1 box, points = freud.data.make_random_system(box_size, num_points, is2D=True) rdf = freud.density.RDF(r_max=r_max, bins=bins) # Test protected attribute access with pytest.raises(AttributeError): rdf.rdf with pytest.raises(AttributeError): rdf.box with pytest.raises(AttributeError): rdf.n_r rdf.compute((box, points), reset=False) # Test if accessible now rdf.rdf rdf.box rdf.n_r rdf.compute((box, points)) # Test if accessible now rdf.rdf rdf.box rdf.n_r def test_invalid_rdf(self): # Make sure that invalid RDF objects raise errors with pytest.raises(ValueError): freud.density.RDF(r_max=-1, bins=10) with pytest.raises(ValueError): freud.density.RDF(r_max=1, bins=0) with pytest.raises(ValueError): freud.density.RDF(r_max=1, bins=10, r_min=2) def test_random_point(self): r_max = 10.0 bins = 10 num_points = 10000 tolerance = 0.1 box_size = r_max * 3.1 for i, r_min in enumerate([0, 0.05, 0.1, 1.0, 3.0]): box, points = freud.data.make_random_system(box_size, num_points) test_set = util.make_raw_query_nlist_test_set( box, points, points, "ball", r_max, 0, True) for nq, neighbors in test_set: rdf = freud.density.RDF(bins, r_max, r_min) if i < 3: rdf.compute(nq, neighbors=neighbors, reset=False) else: rdf.compute(nq, neighbors=neighbors) assert rdf.box == box correct = np.ones(bins, dtype=np.float32) npt.assert_allclose(rdf.rdf, correct, atol=tolerance) # Numerical integration to compute the running coordination # number will be highly inaccurate, so we can only test up to # a limited precision. Also, since dealing with nonzero r_min # values requires extrapolation, we only test when r_min=0. ndens = points.shape[0] / box.volume dr = (r_max - r_min) / bins bin_boundaries = np.array([ r_min + dr * i for i in range(bins + 1) if r_min + dr * i <= r_max ]) bin_volumes = 4 / 3 * np.pi * np.diff(bin_boundaries**3) avg_counts = rdf.rdf * ndens * bin_volumes npt.assert_allclose(rdf.n_r, np.cumsum(avg_counts), rtol=tolerance) def test_repr(self): rdf = freud.density.RDF(r_max=10, bins=100, r_min=0.5) assert str(rdf) == str(eval(repr(rdf))) def test_repr_png(self): r_max = 10.0 bins = 10 num_points = 10 box_size = r_max * 3.1 box, points = freud.data.make_random_system(box_size, num_points) rdf = freud.density.RDF(bins, r_max) with pytest.raises(AttributeError): rdf.plot() assert rdf._repr_png_() is None rdf.compute((box, points), reset=False) rdf.plot() rdf._repr_png_() plt.close("all") def test_points_ne_query_points(self): r_max = 100.0 bins = 100 box_size = r_max * 5 box = freud.box.Box.square(box_size) rdf = freud.density.RDF(bins, r_max) query_points = [] supposed_RDF = [0] N = 100 # With points closely centered around the origin, # the cumulative average bin counts should be same as # having a single point at the origin. # Also, we can check for whether points are not considered against # each other. dr = r_max / bins points = [[dr / 4, 0, 0], [-dr / 4, 0, 0], [0, dr / 4, 0], [0, -dr / 4, 0]] for r in rdf.bin_centers: for k in range(N): query_points.append([ r * np.cos(2 * np.pi * k / N), r * np.sin(2 * np.pi * k / N), 0 ]) supposed_RDF.append(supposed_RDF[-1] + N) supposed_RDF = np.array(supposed_RDF[1:]) test_set = util.make_raw_query_nlist_test_set(box, points, query_points, "ball", r_max, 0, False) for nq, neighbors in test_set: rdf = freud.density.RDF(bins, r_max) rdf.compute(nq, query_points, neighbors=neighbors) npt.assert_allclose(rdf.n_r, supposed_RDF, atol=1e-6) def test_empty_histogram(self): r_max = 0.5 bins = 10 box_size = 5 box = freud.box.Box.cube(box_size) rdf = freud.density.RDF(bins, r_max) points = [[0, 0, 0], [2, 2, 2]] rdf.compute(system=(box, points)) # Test that properties are accessible even though there's no data npt.assert_array_equal(rdf.rdf, np.zeros(bins)) npt.assert_array_equal(rdf.n_r, np.zeros(bins)) @pytest.mark.skipif(NumpyVersion(np.__version__) < "1.15.0", reason="Requires numpy>=1.15.0.") def test_bin_precision(self): # Ensure bin edges are precise bins = 500 r_min = 0 r_max = 50 rdf = freud.density.RDF(bins=bins, r_max=r_max, r_min=r_min) expected_bin_edges = np.histogram_bin_edges(np.array([0], dtype=np.float32), bins=bins, range=[r_min, r_max]) npt.assert_allclose(rdf.bin_edges, expected_bin_edges, atol=1e-6)
def init_fit_frame(self): #frame with the navigation bar for the main plot fit_frame_up = tk.Frame(self.fit_frame) fit_frame_down = tk.LabelFrame(self.fit_frame, relief='groove') fit_frame_up.pack(side=tk.TOP, fill=tk.BOTH) fit_frame_down.pack(side=tk.BOTTOM, fill=tk.BOTH, expand=tk.Y) self.plot_type = tk.IntVar(master=self.fit_frame) self.plot_type.set(1) r_buttons = 'Radial slice', 'Time slice', 'Gradient' for nbutt, butt in enumerate(r_buttons): button = tk.Radiobutton(fit_frame_up, text=butt, variable=self.plot_type, command=self.changed_fit_slice, value=nbutt) button.pack(anchor='w', side=tk.LEFT, pady=2, padx=2) # canvas frame self.fig = Figure(figsize=(10, 10), dpi=75) self.fig.patch.set_facecolor((.93, .93, .93)) self.ax_main = self.fig.add_subplot(111) self.canvasMPL = tkagg.FigureCanvasTkAgg(self.fig, master=fit_frame_down) self.toolbar = NavigationToolbar2Tk(self.canvasMPL, fit_frame_down) def print_figure(filename, **kwargs): #cheat print_figure function to save only the plot without the sliders. if 'bbox_inches' not in kwargs: fig = self.ax_main.figure extent = self.ax_main.get_tightbbox( fig.canvas.renderer).transformed( fig.dpi_scale_trans.inverted()) extent.y1 += .3 extent.x1 += .3 kwargs['bbox_inches'] = extent self.canvas_print_figure(filename, **kwargs) self.canvas_print_figure = self.toolbar.canvas.print_figure self.toolbar.canvas.print_figure = print_figure self.canvasMPL.get_tk_widget().pack(side=tk.TOP, fill=tk.BOTH, expand=1) self.canvasMPL._tkcanvas.pack(side=tk.TOP, fill=tk.BOTH, expand=1) self.lcfs_line = self.ax_main.axvline(1, ls='--', c='k', visible=False) self.zero_line = self.ax_main.axhline(0, ls='--', c='k', visible=False) hbox1 = tk.Frame(fit_frame_down) hbox1.pack(side=tk.BOTTOM, fill=tk.X) mouse_help = tk.Label(hbox1, text='Mouse: ') mouse_left = tk.Label(hbox1, text='Left (+Ctrl): del point (Channel) ', fg="#900000") mouse_mid = tk.Label(hbox1, text='Mid: re-fit ', fg="#009000") mouse_right = tk.Label(hbox1, text='Right: undelete point ', fg="#000090") mouse_wheel = tk.Label(hbox1, text='Wheel: shift', fg="#905090") for w in (mouse_help, mouse_left, mouse_mid, mouse_right, mouse_wheel): w.pack(side=tk.LEFT) hbox2 = tk.Frame(fit_frame_down) hbox2.pack(side=tk.BOTTOM, fill=tk.X) helv36 = tkinter.font.Font(family='Helvetica', size=10, weight='bold') calc_button = tk.Button(hbox2, text='Fit', bg='red', command=self.calculate, font=helv36) calc_button.pack(side=tk.LEFT) self.playfig = tk.PhotoImage(file=icon_dir + 'play.gif', master=self.fit_frame) self.pausefig = tk.PhotoImage(file=icon_dir + 'pause.gif', master=self.fit_frame) self.forwardfig = tk.PhotoImage(file=icon_dir + 'forward.gif', master=self.fit_frame) self.backwardfig = tk.PhotoImage(file=icon_dir + 'backward.gif', master=self.fit_frame) self.backward_button = tk.Button(hbox2, command=self.Backward, image=self.backwardfig) self.backward_button.pack(side=tk.LEFT) self.play_button = tk.Button(hbox2, command=self.Play, image=self.playfig) self.play_button.pack(side=tk.LEFT) self.forward_button = tk.Button(hbox2, command=self.Forward, image=self.forwardfig) self.forward_button.pack(side=tk.LEFT) self.button_3d = tk.Button(hbox2, command=self.plot3d, text='3D', font=helv36) self.button_3d.pack(side=tk.LEFT) self.stop = True self.ctrl = False self.shift = False def stop_handler(event=None, self=self): self.stop = True self.forward_button.bind("<Button-1>", stop_handler) self.backward_button.bind("<Button-1>", stop_handler) vcmd = hbox2.register(self.isfloat) self.view_step = tk.Entry(hbox2, width=4, validate="key", validatecommand=(vcmd, '%P'), justify=tk.CENTER) self.view_step_lbl = tk.Label(hbox2, text='Plot step [ms]') self.view_step.pack(side=tk.RIGHT, padx=10) self.view_step_lbl.pack(side=tk.RIGHT) axcolor = 'lightgoldenrodyellow' self.fig.subplots_adjust(left=.10, bottom=.20, right=.95, top=.95, hspace=.1, wspace=0) from numpy.lib import NumpyVersion kargs = { 'facecolor': axcolor } if NumpyVersion( matplotlib.__version__) > NumpyVersion('2.0.0') else { 'axisbg': axcolor } self.sl_ax_main = self.fig.add_axes([.1, .10, .8, .03], **kargs) self.main_slider = Slider(self.sl_ax_main, '', self.tbeg, self.tend, valinit=self.tbeg) sl_ax = self.fig.add_axes([.1, .03, .35, .03], **kargs) self.sl_eta = Slider(sl_ax, '', 0, 1, valinit=self.options['eta']) sl_ax2 = self.fig.add_axes([.55, .03, .35, .03], **kargs) self.sl_lam = Slider(sl_ax2, '', 0, 1, valinit=self.options['lam']) self.fig.text(.1, .075, 'Time smoothing -->:') self.fig.text(.55, .075, 'Radial smoothing -->:') createToolTip(self.forward_button, 'Go forward by one step') createToolTip(self.backward_button, 'Go backward by one step') createToolTip(self.play_button, 'Go step by step forward, pause by second press') createToolTip( self.view_step, 'Plotting time/radial step, this option influences only the plotting, not fitting!' ) createToolTip(calc_button, 'Calculate the 2d fit of the data') def update_eta(eta): self.options['eta'] = eta stop_handler() def update_lam(lam): stop_handler() self.options['lam'] = lam def update_slider(val): try: if self.plot_type.get() in [1, 2]: self.plt_time = val if self.plot_type.get() in [0]: self.plt_radius = val self.updateMainSlider() self.plot_step() except: print( '!!!!!!!!!!!!!!!!!!!!!main_slider error!!!!!!!!!!!!!!!!!!!!!!!!!!!' ) raise self.main_slider.on_changed(update_slider) self.cid1 = self.fig.canvas.mpl_connect('button_press_event', self.MouseInteraction) self.cid2 = self.fig.canvas.mpl_connect('scroll_event', self.WheelInteraction) self.cid3 = self.fig.canvas.mpl_connect('key_press_event', self.on_key) self.cid4 = self.fig.canvas.mpl_connect('key_release_event', self.off_key) self.cid5 = self.fig.canvas.mpl_connect( 'button_press_event', lambda event: self.fig.canvas._tkcanvas.focus_set()) self.cid6 = self.fig.canvas.mpl_connect('pick_event', self.legend_pick) self.sl_eta.on_changed(update_eta) self.sl_lam.on_changed(update_lam)