Exemple #1
0
def test_fcompiler_flags_append_warning(monkeypatch):
    # Test to check that the warning for append behavior changing in future
    # is triggered.  Need to use a real compiler instance so that we have
    # non-empty flags to start with (otherwise the "if var and append" check
    # will always be false).
    try:
        with suppress_warnings() as sup:
            sup.record()
            fc = numpy.distutils.fcompiler.new_fcompiler(compiler='gnu95')
            fc.customize()
    except numpy.distutils.fcompiler.CompilerNotFound:
        pytest.skip("gfortran not found, so can't execute this test")

    # Ensure NPY_DISTUTILS_APPEND_FLAGS not defined
    monkeypatch.delenv('NPY_DISTUTILS_APPEND_FLAGS', raising=False)

    for opt, envvar in customizable_flags:
        new_flag = '-dummy-{}-flag'.format(opt)
        with suppress_warnings() as sup:
            sup.record()
            prev_flags = getattr(fc.flag_vars, opt)

        monkeypatch.setenv(envvar, new_flag)
        with suppress_warnings() as sup:
            sup.record()
            new_flags = getattr(fc.flag_vars, opt)
            if prev_flags:
                # Check that warning was issued
                assert len(sup.log) == 1

        monkeypatch.delenv(envvar)
        assert_(new_flags == [new_flag])
Exemple #2
0
def test_suppress_warnings_type():
    # Initial state of module, no warnings
    my_mod = _get_fresh_mod()
    assert_equal(getattr(my_mod, '__warningregistry__', {}), {})

    # Test module based warning suppression:
    with suppress_warnings() as sup:
        sup.filter(UserWarning)
        warnings.warn('Some warning')
    assert_warn_len_equal(my_mod, 0)
    sup = suppress_warnings()
    sup.filter(UserWarning)
    with sup:
        warnings.warn('Some warning')
    assert_warn_len_equal(my_mod, 0)
    # And test repeat works:
    sup.filter(module=my_mod)
    with sup:
        warnings.warn('Some warning')
    assert_warn_len_equal(my_mod, 0)

    # Without specified modules, don't clear warnings during context
    with suppress_warnings():
        warnings.simplefilter('ignore')
        warnings.warn('Some warning')
    assert_warn_len_equal(my_mod, 1)
 def test_result(self):
     types = np.typecodes['AllInteger'] + np.typecodes['AllFloat']
     with suppress_warnings() as sup:
         sup.filter(RuntimeWarning)
         for dt in types:
             a = np.ones((), dtype=dt)[()]
             assert_equal(operator.sub(a, a), 0)
 def test_corner(self):
     y = list(linspace(0, 1, 1))
     assert_(y == [0.0], y)
     with suppress_warnings() as sup:
         sup.filter(DeprecationWarning, ".*safely interpreted as an integer")
         y = list(linspace(0, 1, 2.5))
         assert_(y == [0.0, 1.0])
Exemple #5
0
    def test_float_modulus_corner_cases(self):
        # Check remainder magnitude.
        for dt in np.typecodes['Float']:
            b = np.array(1.0, dtype=dt)
            a = np.nextafter(np.array(0.0, dtype=dt), -b)
            rem = self.mod(a, b)
            assert_(rem <= b, 'dt: %s' % dt)
            rem = self.mod(-a, -b)
            assert_(rem >= -b, 'dt: %s' % dt)

        # Check nans, inf
        with suppress_warnings() as sup:
            sup.filter(RuntimeWarning, "invalid value encountered in remainder")
            for dt in np.typecodes['Float']:
                fone = np.array(1.0, dtype=dt)
                fzer = np.array(0.0, dtype=dt)
                finf = np.array(np.inf, dtype=dt)
                fnan = np.array(np.nan, dtype=dt)
                rem = self.mod(fone, fzer)
                assert_(np.isnan(rem), 'dt: %s' % dt)
                # MSVC 2008 returns NaN here, so disable the check.
                #rem = self.mod(fone, finf)
                #assert_(rem == fone, 'dt: %s' % dt)
                rem = self.mod(fone, fnan)
                assert_(np.isnan(rem), 'dt: %s' % dt)
                rem = self.mod(finf, fone)
                assert_(np.isnan(rem), 'dt: %s' % dt)
    def test_float_special(self):
        with suppress_warnings() as sup:
            sup.filter(RuntimeWarning)
            for inf in [np.inf, -np.inf]:
                a = np.array([[inf,  np.nan], [np.nan, np.nan]])
                assert_equal(np.nanmedian(a, axis=0), [inf,  np.nan])
                assert_equal(np.nanmedian(a, axis=1), [inf,  np.nan])
                assert_equal(np.nanmedian(a), inf)

                # minimum fill value check
                a = np.array([[np.nan, np.nan, inf],
                             [np.nan, np.nan, inf]])
                assert_equal(np.nanmedian(a), inf)
                assert_equal(np.nanmedian(a, axis=0), [np.nan, np.nan, inf])
                assert_equal(np.nanmedian(a, axis=1), inf)

                # no mask path
                a = np.array([[inf, inf], [inf, inf]])
                assert_equal(np.nanmedian(a, axis=1), inf)

                for i in range(0, 10):
                    for j in range(1, 10):
                        a = np.array([([np.nan] * i) + ([inf] * j)] * 2)
                        assert_equal(np.nanmedian(a), inf)
                        assert_equal(np.nanmedian(a, axis=1), inf)
                        assert_equal(np.nanmedian(a, axis=0),
                                     ([np.nan] * i) + [inf] * j)

                        a = np.array([([np.nan] * i) + ([-inf] * j)] * 2)
                        assert_equal(np.nanmedian(a), -inf)
                        assert_equal(np.nanmedian(a, axis=1), -inf)
                        assert_equal(np.nanmedian(a, axis=0),
                                     ([np.nan] * i) + [-inf] * j)
    def test_multiple_percentiles(self):
        perc = [50, 100]
        mat = np.ones((4, 3))
        nan_mat = np.nan * mat
        # For checking consistency in higher dimensional case
        large_mat = np.ones((3, 4, 5))
        large_mat[:, 0:2:4, :] = 0
        large_mat[:, :, 3:] *= 2
        for axis in [None, 0, 1]:
            for keepdim in [False, True]:
                with suppress_warnings() as sup:
                    sup.filter(RuntimeWarning, "All-NaN slice encountered")
                    val = np.percentile(mat, perc, axis=axis, keepdims=keepdim)
                    nan_val = np.nanpercentile(nan_mat, perc, axis=axis,
                                               keepdims=keepdim)
                    assert_equal(nan_val.shape, val.shape)

                    val = np.percentile(large_mat, perc, axis=axis,
                                        keepdims=keepdim)
                    nan_val = np.nanpercentile(large_mat, perc, axis=axis,
                                               keepdims=keepdim)
                    assert_equal(nan_val, val)

        megamat = np.ones((3, 4, 5, 6))
        assert_equal(np.nanpercentile(megamat, perc, axis=(1, 2)).shape, (2, 3, 6))
    def test_keepdims(self):
        mat = np.eye(3)
        for axis in [None, 0, 1]:
            tgt = np.percentile(mat, 70, axis=axis, out=None,
                                overwrite_input=False)
            res = np.nanpercentile(mat, 70, axis=axis, out=None,
                                   overwrite_input=False)
            assert_(res.ndim == tgt.ndim)

        d = np.ones((3, 5, 7, 11))
        # Randomly set some elements to NaN:
        w = np.random.random((4, 200)) * np.array(d.shape)[:, None]
        w = w.astype(np.intp)
        d[tuple(w)] = np.nan
        with suppress_warnings() as sup:
            sup.filter(RuntimeWarning)
            res = np.nanpercentile(d, 90, axis=None, keepdims=True)
            assert_equal(res.shape, (1, 1, 1, 1))
            res = np.nanpercentile(d, 90, axis=(0, 1), keepdims=True)
            assert_equal(res.shape, (1, 1, 7, 11))
            res = np.nanpercentile(d, 90, axis=(0, 3), keepdims=True)
            assert_equal(res.shape, (1, 5, 7, 1))
            res = np.nanpercentile(d, 90, axis=(1,), keepdims=True)
            assert_equal(res.shape, (3, 1, 7, 11))
            res = np.nanpercentile(d, 90, axis=(0, 1, 2, 3), keepdims=True)
            assert_equal(res.shape, (1, 1, 1, 1))
            res = np.nanpercentile(d, 90, axis=(0, 1, 3), keepdims=True)
            assert_equal(res.shape, (1, 1, 7, 1))
 def test_int_from_infinite_longdouble___int__(self):
     x = np.longdouble(np.inf)
     assert_raises(OverflowError, x.__int__)
     with suppress_warnings() as sup:
         sup.record(np.ComplexWarning)
         x = np.clongdouble(np.inf)
         assert_raises(OverflowError, x.__int__)
         assert_equal(len(sup.log), 1)
Exemple #10
0
    def test_slice_decref_getsetslice(self):
        # See gh-10066, a temporary slice object should be discarted.
        # This test is only really interesting on Python 2 since
        # it goes through `__set/getslice__` here and can probably be
        # removed. Use 0:7 to make sure it is never None:7.
        class KeepIndexObject(np.ndarray):
            def __getitem__(self, indx):
                self.indx = indx
                if indx == slice(0, 7):
                    raise ValueError

            def __setitem__(self, indx, val):
                self.indx = indx
                if indx == slice(0, 4):
                    raise ValueError

        k = np.array([1]).view(KeepIndexObject)
        k[0:5]
        assert_equal(k.indx, slice(0, 5))
        assert_equal(sys.getrefcount(k.indx), 2)
        try:
            k[0:7]
            raise AssertionError
        except ValueError:
            # The exception holds a reference to the slice so clear on Py2
            if hasattr(sys, 'exc_clear'):
                with suppress_warnings() as sup:
                    sup.filter(DeprecationWarning)
                    sys.exc_clear()
        assert_equal(k.indx, slice(0, 7))
        assert_equal(sys.getrefcount(k.indx), 2)

        k[0:3] = 6
        assert_equal(k.indx, slice(0, 3))
        assert_equal(sys.getrefcount(k.indx), 2)
        try:
            k[0:4] = 2
            raise AssertionError
        except ValueError:
            # The exception holds a reference to the slice so clear on Py2
            if hasattr(sys, 'exc_clear'):
                with suppress_warnings() as sup:
                    sup.filter(DeprecationWarning)
                    sys.exc_clear()
        assert_equal(k.indx, slice(0, 4))
        assert_equal(sys.getrefcount(k.indx), 2)
def test_suppress_warnings_module():
    # Initial state of module, no warnings
    my_mod = _get_fresh_mod()
    assert_equal(getattr(my_mod, '__warningregistry__', {}), {})

    def warn_other_module():
        # Apply along axis is implemented in python; stacklevel=2 means
        # we end up inside its module, not ours.
        def warn(arr):
            warnings.warn("Some warning 2", stacklevel=2)
            return arr
        np.apply_along_axis(warn, 0, [0])

    # Test module based warning suppression:
    assert_warn_len_equal(my_mod, 0)
    with suppress_warnings() as sup:
        sup.record(UserWarning)
        # suppress warning from other module (may have .pyc ending),
        # if apply_along_axis is moved, had to be changed.
        sup.filter(module=np.lib.shape_base)
        warnings.warn("Some warning")
        warn_other_module()
    # Check that the suppression did test the file correctly (this module
    # got filtered)
    assert_equal(len(sup.log), 1)
    assert_equal(sup.log[0].message.args[0], "Some warning")
    assert_warn_len_equal(my_mod, 0, py37=0)
    sup = suppress_warnings()
    # Will have to be changed if apply_along_axis is moved:
    sup.filter(module=my_mod)
    with sup:
        warnings.warn('Some warning')
    assert_warn_len_equal(my_mod, 0)
    # And test repeat works:
    sup.filter(module=my_mod)
    with sup:
        warnings.warn('Some warning')
    assert_warn_len_equal(my_mod, 0)

    # Without specified modules, don't clear warnings during context
    # Python 3.7 does not add ignored warnings.
    with suppress_warnings():
        warnings.simplefilter('ignore')
        warnings.warn('Some warning')
    assert_warn_len_equal(my_mod, 1, py37=0)
Exemple #12
0
 def test_longdouble_int(self):
     # gh-627
     x = np.longdouble(np.inf)
     assert_raises(OverflowError, int, x)
     with suppress_warnings() as sup:
         sup.record(np.ComplexWarning)
         x = np.clongdouble(np.inf)
         assert_raises(OverflowError, int, x)
         assert_equal(len(sup.log), 1)
 def test_decode(self):
     if sys.version_info[0] >= 3:
         A = np.char.array([b'\\u03a3'])
         assert_(A.decode('unicode-escape')[0] == '\u03a3')
     else:
         with suppress_warnings() as sup:
             if sys.py3kwarning:
                 sup.filter(DeprecationWarning, "'hex_codec'")
             A = np.char.array(['736563726574206d657373616765'])
             assert_(A.decode('hex_codec')[0] == 'secret message')
Exemple #14
0
 def test_ddof_corrcoef(self):
     # See gh-3336
     x = np.ma.masked_equal([1, 2, 3, 4, 5], 4)
     y = np.array([2, 2.5, 3.1, 3, 5])
     # this test can be removed after deprecation.
     with suppress_warnings() as sup:
         sup.filter(DeprecationWarning, "bias and ddof have no effect")
         r0 = np.ma.corrcoef(x, y, ddof=0)
         r1 = np.ma.corrcoef(x, y, ddof=1)
         # ddof should not have an effect (it gets cancelled out)
         assert_allclose(r0.data, r1.data)
 def test_result_values(self):
     for f, fcmp in zip(self.nanfuncs, [np.greater, np.less]):
         for row in _ndat:
             with suppress_warnings() as sup:
                 sup.filter(RuntimeWarning, "invalid value encountered in")
                 ind = f(row)
                 val = row[ind]
                 # comparing with NaN is tricky as the result
                 # is always false except for NaN != NaN
                 assert_(not np.isnan(val))
                 assert_(not fcmp(val, row).any())
                 assert_(not np.equal(val, row[:ind]).any())
Exemple #16
0
def test_suppress_warnings_decorate_no_record():
    sup = suppress_warnings()
    sup.filter(UserWarning)

    @sup
    def warn(category):
        warnings.warn('Some warning', category)

    with warnings.catch_warnings(record=True) as w:
        warnings.simplefilter("always")
        warn(UserWarning)  # should be supppressed
        warn(RuntimeWarning)
        assert_(len(w) == 1)
Exemple #17
0
def test_suppress_warnings_record():
    sup = suppress_warnings()
    log1 = sup.record()

    with sup:
        log2 = sup.record(message='Some other warning 2')
        sup.filter(message='Some warning')
        warnings.warn('Some warning')
        warnings.warn('Some other warning')
        warnings.warn('Some other warning 2')

        assert_(len(sup.log) == 2)
        assert_(len(log1) == 1)
        assert_(len(log2) == 1)
        assert_(log2[0].message.args[0] == 'Some other warning 2')

    # Do it again, with the same context to see if some warnings survived:
    with sup:
        log2 = sup.record(message='Some other warning 2')
        sup.filter(message='Some warning')
        warnings.warn('Some warning')
        warnings.warn('Some other warning')
        warnings.warn('Some other warning 2')

        assert_(len(sup.log) == 2)
        assert_(len(log1) == 1)
        assert_(len(log2) == 1)
        assert_(log2[0].message.args[0] == 'Some other warning 2')

    # Test nested:
    with suppress_warnings() as sup:
        sup.record()
        with suppress_warnings() as sup2:
            sup2.record(message='Some warning')
            warnings.warn('Some warning')
            warnings.warn('Some other warning')
            assert_(len(sup2.log) == 1)
        assert_(len(sup.log) == 1)
    def test_normed(self):
        sup = suppress_warnings()
        with sup:
            rec = sup.record(np.VisibleDeprecationWarning, '.*normed.*')
            # Check that the integral of the density equals 1.
            n = 100
            v = np.random.rand(n)
            a, b = histogram(v, normed=True)
            area = np.sum(a * np.diff(b))
            assert_almost_equal(area, 1)
            assert_equal(len(rec), 1)

        sup = suppress_warnings()
        with sup:
            rec = sup.record(np.VisibleDeprecationWarning, '.*normed.*')
            # Check with non-constant bin widths (buggy but backwards
            # compatible)
            v = np.arange(10)
            bins = [0, 1, 5, 9, 10]
            a, b = histogram(v, bins, normed=True)
            area = np.sum(a * np.diff(b))
            assert_almost_equal(area, 1)
            assert_equal(len(rec), 1)
Exemple #19
0
def test_suppress_warnings_forwarding():
    def warn_other_module():
        # Apply along axis is implemented in python; stacklevel=2 means
        # we end up inside its module, not ours.
        def warn(arr):
            warnings.warn("Some warning", stacklevel=2)
            return arr

        np.apply_along_axis(warn, 0, [0])

    with suppress_warnings() as sup:
        sup.record()
        with suppress_warnings("always"):
            for i in range(2):
                warnings.warn("Some warning")

        assert_(len(sup.log) == 2)

    with suppress_warnings() as sup:
        sup.record()
        with suppress_warnings("location"):
            for i in range(2):
                warnings.warn("Some warning")
                warnings.warn("Some warning")

        assert_(len(sup.log) == 2)

    with suppress_warnings() as sup:
        sup.record()
        with suppress_warnings("module"):
            for i in range(2):
                warnings.warn("Some warning")
                warnings.warn("Some warning")
                warn_other_module()

        assert_(len(sup.log) == 2)

    with suppress_warnings() as sup:
        sup.record()
        with suppress_warnings("once"):
            for i in range(2):
                warnings.warn("Some warning")
                warnings.warn("Some other warning")
                warn_other_module()

        assert_(len(sup.log) == 2)
Exemple #20
0
    def test_float_special(self):
        with suppress_warnings() as sup:
            sup.filter(RuntimeWarning)
            a = np.array([[np.inf,  np.nan], [np.nan, np.nan]])
            assert_equal(np.nanmedian(a, axis=0), [np.inf,  np.nan])
            assert_equal(np.nanmedian(a, axis=1), [np.inf,  np.nan])
            assert_equal(np.nanmedian(a), np.inf)

            # minimum fill value check
            a = np.array([[np.nan, np.nan, np.inf], [np.nan, np.nan, np.inf]])
            assert_equal(np.nanmedian(a, axis=1), np.inf)

            # no mask path
            a = np.array([[np.inf, np.inf], [np.inf, np.inf]])
            assert_equal(np.nanmedian(a, axis=1), np.inf)
 def test_dtype_from_char(self):
     mat = np.eye(3)
     codes = 'efdgFDG'
     for nf, rf in zip(self.nanfuncs, self.stdfuncs):
         for c in codes:
             with suppress_warnings() as sup:
                 if nf in {np.nanstd, np.nanvar} and c in 'FDG':
                     # Giving the warning is a small bug, see gh-8000
                     sup.filter(np.ComplexWarning)
                 tgt = rf(mat, dtype=c, axis=1).dtype.type
                 res = nf(mat, dtype=c, axis=1).dtype.type
                 assert_(res is tgt)
                 # scalar case
                 tgt = rf(mat, dtype=c, axis=None).dtype.type
                 res = nf(mat, dtype=c, axis=None).dtype.type
                 assert_(res is tgt)
 def test_ddof_too_big(self):
     nanfuncs = [np.nanvar, np.nanstd]
     stdfuncs = [np.var, np.std]
     dsize = [len(d) for d in _rdat]
     for nf, rf in zip(nanfuncs, stdfuncs):
         for ddof in range(5):
             with suppress_warnings() as sup:
                 sup.record(RuntimeWarning)
                 sup.filter(np.ComplexWarning)
                 tgt = [ddof >= d for d in dsize]
                 res = nf(_ndat, axis=1, ddof=ddof)
                 assert_equal(np.isnan(res), tgt)
                 if any(tgt):
                     assert_(len(sup.log) == 1)
                 else:
                     assert_(len(sup.log) == 0)
    def test_bool_conversion(self):
        # gh-12107
        # Reference integer histogram
        a = np.array([1, 1, 0], dtype=np.uint8)
        int_hist, int_edges = np.histogram(a)

        # Should raise an warning on booleans
        # Ensure that the histograms are equivalent, need to suppress
        # the warnings to get the actual outputs
        with suppress_warnings() as sup:
            rec = sup.record(RuntimeWarning, 'Converting input from .*')
            hist, edges = np.histogram([True, True, False])
            # A warning should be issued
            assert_equal(len(rec), 1)
            assert_array_equal(hist, int_hist)
            assert_array_equal(edges, int_edges)
Exemple #24
0
    def test_recarrays(self):
        """Test record arrays."""
        a = np.empty(2, [('floupi', float), ('floupa', float)])
        a['floupi'] = [1, 2]
        a['floupa'] = [1, 2]
        b = a.copy()

        self._test_equal(a, b)

        c = np.empty(2, [('floupipi', float), ('floupa', float)])
        c['floupipi'] = a['floupi'].copy()
        c['floupa'] = a['floupa'].copy()

        with suppress_warnings() as sup:
            l = sup.record(FutureWarning, message="elementwise == ")
            self._test_not_equal(c, b)
            assert_(len(l) == 1)
    def test_allnans(self):
        mat = np.array([np.nan]*9).reshape(3, 3)
        for axis in [None, 0, 1]:
            with suppress_warnings() as sup:
                sup.record(RuntimeWarning)

                assert_(np.isnan(np.nanmedian(mat, axis=axis)).all())
                if axis is None:
                    assert_(len(sup.log) == 1)
                else:
                    assert_(len(sup.log) == 3)
                # Check scalar
                assert_(np.isnan(np.nanmedian(np.nan)))
                if axis is None:
                    assert_(len(sup.log) == 2)
                else:
                    assert_(len(sup.log) == 4)
Exemple #26
0
    def test_py2_float_print(self):
        # gh-10753
        # In python2, the python float type implements an obsolte method
        # tp_print, which overrides tp_repr and tp_str when using the "print"
        # keyword/method to output to a "real file" (ie, not a StringIO). Make
        # sure we don't inherit it.
        x = np.double(0.1999999999999)
        with TemporaryFile('r+t') as f:
            print(x, file=f)
            f.seek(0)
            output = f.read()
        assert_equal(output, str(x) + '\n')
        # In python2 the value float('0.1999999999999') prints with reduced
        # precision as '0.2', but we want numpy's np.double('0.1999999999999')
        # to print the unique value, '0.1999999999999'.

        # gh-11031
        # Only in the python2 interactive shell and when stdout is a "real"
        # file, the output of the last command is printed to stdout without
        # Py_PRINT_RAW (unlike the print statement) so `>>> x` and `>>> print
        # x` are potentially different. Make sure they are the same. The only
        # way I found to get prompt-like output is using an actual prompt from
        # the 'code' module. Again, must use tempfile to get a "real" file.

        # dummy user-input which enters one line and then ctrl-Ds.
        def userinput():
            yield 'np.sqrt(2)'
            raise EOFError
        gen = userinput()
        input_func = lambda prompt="": next(gen)

        with TemporaryFile('r+t') as fo, TemporaryFile('r+t') as fe:
            orig_stdout, orig_stderr = sys.stdout, sys.stderr
            sys.stdout, sys.stderr = fo, fe

            # py2 code.interact sends irrelevant internal DeprecationWarnings
            with suppress_warnings() as sup:
                sup.filter(DeprecationWarning)
                code.interact(local={'np': np}, readfunc=input_func, banner='')

            sys.stdout, sys.stderr = orig_stdout, orig_stderr

            fo.seek(0)
            capture = fo.read().strip()

        assert_equal(capture, repr(np.sqrt(2)))
Exemple #27
0
    def test_float_special(self):
        with suppress_warnings() as sup:
            sup.filter(RuntimeWarning)
            for inf in [np.inf, -np.inf]:
                a = np.array([[inf,  np.nan], [np.nan, np.nan]])
                assert_equal(np.nanmedian(a, axis=0), [inf,  np.nan])
                assert_equal(np.nanmedian(a, axis=1), [inf,  np.nan])
                assert_equal(np.nanmedian(a), inf)

                # minimum fill value check
                a = np.array([[np.nan, np.nan, inf],
                             [np.nan, np.nan, inf]])
                assert_equal(np.nanmedian(a), inf)
                assert_equal(np.nanmedian(a, axis=0), [np.nan, np.nan, inf])
                assert_equal(np.nanmedian(a, axis=1), inf)

                # no mask path
                a = np.array([[inf, inf], [inf, inf]])
                assert_equal(np.nanmedian(a, axis=1), inf)

                a = np.array([[inf, 7, -inf, -9],
                              [-10, np.nan, np.nan, 5],
                              [4, np.nan, np.nan, inf]],
                              dtype=np.float32)
                if inf > 0:
                    assert_equal(np.nanmedian(a, axis=0), [4., 7., -inf, 5.])
                    assert_equal(np.nanmedian(a), 4.5)
                else:
                    assert_equal(np.nanmedian(a, axis=0), [-10., 7., -inf, -9.])
                    assert_equal(np.nanmedian(a), -2.5)
                assert_equal(np.nanmedian(a, axis=-1), [-1., -2.5, inf])

                for i in range(0, 10):
                    for j in range(1, 10):
                        a = np.array([([np.nan] * i) + ([inf] * j)] * 2)
                        assert_equal(np.nanmedian(a), inf)
                        assert_equal(np.nanmedian(a, axis=1), inf)
                        assert_equal(np.nanmedian(a, axis=0),
                                     ([np.nan] * i) + [inf] * j)

                        a = np.array([([np.nan] * i) + ([-inf] * j)] * 2)
                        assert_equal(np.nanmedian(a), -inf)
                        assert_equal(np.nanmedian(a, axis=1), -inf)
                        assert_equal(np.nanmedian(a, axis=0),
                                     ([np.nan] * i) + [-inf] * j)
Exemple #28
0
    def test_testCopySize(self):
        # Tests of some subtle points of copying and sizing.
        with suppress_warnings() as sup:
            sup.filter(
                np.ma.core.MaskedArrayFutureWarning,
                "setting an item on a masked array which has a "
                "shared mask will not copy")

            n = [0, 0, 1, 0, 0]
            m = make_mask(n)
            m2 = make_mask(m)
            self.assertTrue(m is m2)
            m3 = make_mask(m, copy=1)
            self.assertTrue(m is not m3)

            x1 = np.arange(5)
            y1 = array(x1, mask=m)
            self.assertTrue(y1._data is not x1)
            self.assertTrue(allequal(x1, y1._data))
            self.assertTrue(y1.mask is m)

            y1a = array(y1, copy=0)
            self.assertTrue(y1a.mask is y1.mask)

            y2 = array(x1, mask=m, copy=0)
            self.assertTrue(y2.mask is m)
            self.assertTrue(y2[2] is masked)
            y2[2] = 9
            self.assertTrue(y2[2] is not masked)
            self.assertTrue(y2.mask is not m)
            self.assertTrue(allequal(y2.mask, 0))

            y3 = array(x1 * 1.0, mask=m)
            self.assertTrue(filled(y3).dtype is (x1 * 1.0).dtype)

            x4 = arange(4)
            x4[2] = masked
            y4 = resize(x4, (8,))
            self.assertTrue(eq(concatenate([x4, x4]), y4))
            self.assertTrue(eq(getmask(y4), [0, 0, 1, 0, 0, 0, 1, 0]))
            y5 = repeat(x4, (2, 2, 2, 2), axis=0)
            self.assertTrue(eq(y5, [0, 0, 1, 1, 2, 2, 3, 3]))
            y6 = repeat(x4, 2, axis=0)
            self.assertTrue(eq(y5, y6))
Exemple #29
0
def assert_equal(actual, desired, err_msg=''):
    """
    Asserts that two items are equal.

    """
    # Case #1: dictionary .....
    if isinstance(desired, dict):
        if not isinstance(actual, dict):
            raise AssertionError(repr(type(actual)))
        assert_equal(len(actual), len(desired), err_msg)
        for k, i in desired.items():
            if k not in actual:
                raise AssertionError("%s not in %s" % (k, actual))
            assert_equal(actual[k], desired[k], 'key=%r\n%s' % (k, err_msg))
        return
    # Case #2: lists .....
    if isinstance(desired, (list, tuple)) and isinstance(actual, (list, tuple)):
        return _assert_equal_on_sequences(actual, desired, err_msg='')
    if not (isinstance(actual, ndarray) or isinstance(desired, ndarray)):
        msg = build_err_msg([actual, desired], err_msg,)
        with suppress_warnings() as sup:
            sup.filter(FutureWarning, ".*NAT ==")
            if not desired == actual:
                raise AssertionError(msg)
        return
    # Case #4. arrays or equivalent
    if ((actual is masked) and not (desired is masked)) or \
            ((desired is masked) and not (actual is masked)):
        msg = build_err_msg([actual, desired],
                            err_msg, header='', names=('x', 'y'))
        raise ValueError(msg)
    actual = np.array(actual, copy=False, subok=True)
    desired = np.array(desired, copy=False, subok=True)
    (actual_dtype, desired_dtype) = (actual.dtype, desired.dtype)
    if actual_dtype.char == "S" and desired_dtype.char == "S":
        return _assert_equal_on_sequences(actual.tolist(),
                                          desired.tolist(),
                                          err_msg='')
    return assert_array_equal(actual, desired, err_msg)
Exemple #30
0
    def test_ufunc_return_ndarray(self):
        fp = memmap(self.tmpfp, dtype=self.dtype, shape=self.shape)
        fp[:] = self.data

        with suppress_warnings() as sup:
            sup.filter(FutureWarning, "np.average currently does not preserve")
            for unary_op in [sum, average, product]:
                result = unary_op(fp)
                assert_(isscalar(result))
                assert_(result.__class__ is self.data[0, 0].__class__)

                assert_(unary_op(fp, axis=0).__class__ is ndarray)
                assert_(unary_op(fp, axis=1).__class__ is ndarray)

        for binary_op in [add, subtract, multiply]:
            assert_(binary_op(fp, self.data).__class__ is ndarray)
            assert_(binary_op(self.data, fp).__class__ is ndarray)
            assert_(binary_op(fp, fp).__class__ is ndarray)

        fp += 1
        assert(fp.__class__ is memmap)
        add(fp, 1, out=fp)
        assert(fp.__class__ is memmap)
Exemple #31
0
 def test_basic(self, dist, mv_ex):
     with suppress_warnings() as sup:
         # filter the warnings thrown by UNU.RAN
         sup.filter(RuntimeWarning)
         rng = TransformedDensityRejection(dist, random_state=42)
     check_cont_samples(rng, dist, mv_ex)
Exemple #32
0
    def test_half_ufuncs(self):
        """Test the various ufuncs"""

        a = np.array([0, 1, 2, 4, 2], dtype=float16)
        b = np.array([-2, 5, 1, 4, 3], dtype=float16)
        c = np.array([0, -1, -np.inf, np.nan, 6], dtype=float16)

        assert_equal(np.add(a, b), [-2, 6, 3, 8, 5])
        assert_equal(np.subtract(a, b), [2, -4, 1, 0, -1])
        assert_equal(np.multiply(a, b), [0, 5, 2, 16, 6])
        assert_equal(np.divide(a, b), [0, 0.199951171875, 2, 1, 0.66650390625])

        assert_equal(np.equal(a, b), [False, False, False, True, False])
        assert_equal(np.not_equal(a, b), [True, True, True, False, True])
        assert_equal(np.less(a, b), [False, True, False, False, True])
        assert_equal(np.less_equal(a, b), [False, True, False, True, True])
        assert_equal(np.greater(a, b), [True, False, True, False, False])
        assert_equal(np.greater_equal(a, b), [True, False, True, True, False])
        assert_equal(np.logical_and(a, b), [False, True, True, True, True])
        assert_equal(np.logical_or(a, b), [True, True, True, True, True])
        assert_equal(np.logical_xor(a, b), [True, False, False, False, False])
        assert_equal(np.logical_not(a), [True, False, False, False, False])

        assert_equal(np.isnan(c), [False, False, False, True, False])
        assert_equal(np.isinf(c), [False, False, True, False, False])
        assert_equal(np.isfinite(c), [True, True, False, False, True])
        assert_equal(np.signbit(b), [True, False, False, False, False])

        assert_equal(np.copysign(b, a), [2, 5, 1, 4, 3])

        assert_equal(np.maximum(a, b), [0, 5, 2, 4, 3])
        with suppress_warnings() as sup:
            sup.record(RuntimeWarning)
            x = np.maximum(b, c)
            assert_(np.isnan(x[3]))
        assert_equal(len(sup.log), 1)
        x[3] = 0
        assert_equal(x, [0, 5, 1, 0, 6])
        assert_equal(np.minimum(a, b), [-2, 1, 1, 4, 2])
        with suppress_warnings() as sup:
            sup.record(RuntimeWarning)
            x = np.minimum(b, c)
            assert_(np.isnan(x[3]))
        assert_equal(len(sup.log), 1)
        x[3] = 0
        assert_equal(x, [-2, -1, -np.inf, 0, 3])
        assert_equal(np.fmax(a, b), [0, 5, 2, 4, 3])
        assert_equal(np.fmax(b, c), [0, 5, 1, 4, 6])
        assert_equal(np.fmin(a, b), [-2, 1, 1, 4, 2])
        assert_equal(np.fmin(b, c), [-2, -1, -np.inf, 4, 3])

        assert_equal(np.floor_divide(a, b), [0, 0, 2, 1, 0])
        assert_equal(np.remainder(a, b), [0, 1, 0, 0, 2])
        assert_equal(np.divmod(a, b), ([0, 0, 2, 1, 0], [0, 1, 0, 0, 2]))
        assert_equal(np.square(b), [4, 25, 1, 16, 9])
        assert_equal(np.reciprocal(b), [-0.5, 0.199951171875, 1, 0.25, 0.333251953125])
        assert_equal(np.ones_like(b), [1, 1, 1, 1, 1])
        assert_equal(np.conjugate(b), b)
        assert_equal(np.absolute(b), [2, 5, 1, 4, 3])
        assert_equal(np.negative(b), [2, -5, -1, -4, -3])
        assert_equal(np.positive(b), b)
        assert_equal(np.sign(b), [-1, 1, 1, 1, 1])
        assert_equal(np.modf(b), ([0, 0, 0, 0, 0], b))
        assert_equal(np.frexp(b), ([-0.5, 0.625, 0.5, 0.5, 0.75], [2, 3, 1, 3, 2]))
        assert_equal(np.ldexp(b, [0, 1, 2, 4, 2]), [-2, 10, 4, 64, 12])
Exemple #33
0
def test_exact_values():
    # Check that updating stored values with exact ones worked.
    with suppress_warnings() as sup:
        sup.filter(ConstantWarning)
        for key in _cd.exact_values:
            assert_((_cd.exact_values[key][0] - value(key)) / value(key) == 0)
Exemple #34
0
    def test_callback_type(self):
        # The legacy callback type changes meaning of 'maxiter'
        np.random.seed(1)
        A = np.random.rand(20, 20)
        b = np.random.rand(20)

        cb_count = [0]

        def pr_norm_cb(r):
            cb_count[0] += 1
            assert_(isinstance(r, float))

        def x_cb(x):
            cb_count[0] += 1
            assert_(isinstance(x, np.ndarray))

        with suppress_warnings() as sup:
            sup.filter(DeprecationWarning, ".*called without specifying.*")
            # 2 iterations is not enough to solve the problem
            cb_count = [0]
            x, info = gmres(A,
                            b,
                            tol=1e-6,
                            atol=0,
                            callback=pr_norm_cb,
                            maxiter=2,
                            restart=50)
            assert info == 2
            assert cb_count[0] == 2

        # With `callback_type` specified, no warning should be raised
        cb_count = [0]
        x, info = gmres(A,
                        b,
                        tol=1e-6,
                        atol=0,
                        callback=pr_norm_cb,
                        maxiter=2,
                        restart=50,
                        callback_type='legacy')
        assert info == 2
        assert cb_count[0] == 2

        # 2 restart cycles is enough to solve the problem
        cb_count = [0]
        x, info = gmres(A,
                        b,
                        tol=1e-6,
                        atol=0,
                        callback=pr_norm_cb,
                        maxiter=2,
                        restart=50,
                        callback_type='pr_norm')
        assert info == 0
        assert cb_count[0] > 2

        # 2 restart cycles is enough to solve the problem
        cb_count = [0]
        x, info = gmres(A,
                        b,
                        tol=1e-6,
                        atol=0,
                        callback=x_cb,
                        maxiter=2,
                        restart=50,
                        callback_type='x')
        assert info == 0
        assert cb_count[0] == 2
    def check_einsum_sums(self, dtype, do_opt=False):
        # Check various sums.  Does many sizes to exercise unrolled loops.

        # sum(a, axis=-1)
        for n in range(1, 17):
            a = np.arange(n, dtype=dtype)
            assert_equal(np.einsum("i->", a, optimize=do_opt),
                         np.sum(a, axis=-1).astype(dtype))
            assert_equal(np.einsum(a, [0], [], optimize=do_opt),
                         np.sum(a, axis=-1).astype(dtype))

        for n in range(1, 17):
            a = np.arange(2*3*n, dtype=dtype).reshape(2, 3, n)
            assert_equal(np.einsum("...i->...", a, optimize=do_opt),
                         np.sum(a, axis=-1).astype(dtype))
            assert_equal(np.einsum(a, [Ellipsis, 0], [Ellipsis], optimize=do_opt),
                         np.sum(a, axis=-1).astype(dtype))

        # sum(a, axis=0)
        for n in range(1, 17):
            a = np.arange(2*n, dtype=dtype).reshape(2, n)
            assert_equal(np.einsum("i...->...", a, optimize=do_opt),
                         np.sum(a, axis=0).astype(dtype))
            assert_equal(np.einsum(a, [0, Ellipsis], [Ellipsis], optimize=do_opt),
                         np.sum(a, axis=0).astype(dtype))

        for n in range(1, 17):
            a = np.arange(2*3*n, dtype=dtype).reshape(2, 3, n)
            assert_equal(np.einsum("i...->...", a, optimize=do_opt),
                         np.sum(a, axis=0).astype(dtype))
            assert_equal(np.einsum(a, [0, Ellipsis], [Ellipsis], optimize=do_opt),
                         np.sum(a, axis=0).astype(dtype))

        # trace(a)
        for n in range(1, 17):
            a = np.arange(n*n, dtype=dtype).reshape(n, n)
            assert_equal(np.einsum("ii", a, optimize=do_opt),
                         np.trace(a).astype(dtype))
            assert_equal(np.einsum(a, [0, 0], optimize=do_opt),
                         np.trace(a).astype(dtype))

        # multiply(a, b)
        assert_equal(np.einsum("..., ...", 3, 4), 12)  # scalar case
        for n in range(1, 17):
            a = np.arange(3 * n, dtype=dtype).reshape(3, n)
            b = np.arange(2 * 3 * n, dtype=dtype).reshape(2, 3, n)
            assert_equal(np.einsum("..., ...", a, b, optimize=do_opt),
                         np.multiply(a, b))
            assert_equal(np.einsum(a, [Ellipsis], b, [Ellipsis], optimize=do_opt),
                         np.multiply(a, b))

        # inner(a,b)
        for n in range(1, 17):
            a = np.arange(2 * 3 * n, dtype=dtype).reshape(2, 3, n)
            b = np.arange(n, dtype=dtype)
            assert_equal(np.einsum("...i, ...i", a, b, optimize=do_opt), np.inner(a, b))
            assert_equal(np.einsum(a, [Ellipsis, 0], b, [Ellipsis, 0], optimize=do_opt),
                         np.inner(a, b))

        for n in range(1, 11):
            a = np.arange(n * 3 * 2, dtype=dtype).reshape(n, 3, 2)
            b = np.arange(n, dtype=dtype)
            assert_equal(np.einsum("i..., i...", a, b, optimize=do_opt),
                         np.inner(a.T, b.T).T)
            assert_equal(np.einsum(a, [0, Ellipsis], b, [0, Ellipsis], optimize=do_opt),
                         np.inner(a.T, b.T).T)

        # outer(a,b)
        for n in range(1, 17):
            a = np.arange(3, dtype=dtype)+1
            b = np.arange(n, dtype=dtype)+1
            assert_equal(np.einsum("i,j", a, b, optimize=do_opt),
                         np.outer(a, b))
            assert_equal(np.einsum(a, [0], b, [1], optimize=do_opt),
                         np.outer(a, b))

        # Suppress the complex warnings for the 'as f8' tests
        with suppress_warnings() as sup:
            sup.filter(np.ComplexWarning)

            # matvec(a,b) / a.dot(b) where a is matrix, b is vector
            for n in range(1, 17):
                a = np.arange(4*n, dtype=dtype).reshape(4, n)
                b = np.arange(n, dtype=dtype)
                assert_equal(np.einsum("ij, j", a, b, optimize=do_opt),
                             np.dot(a, b))
                assert_equal(np.einsum(a, [0, 1], b, [1], optimize=do_opt),
                             np.dot(a, b))

                c = np.arange(4, dtype=dtype)
                np.einsum("ij,j", a, b, out=c,
                          dtype='f8', casting='unsafe', optimize=do_opt)
                assert_equal(c,
                             np.dot(a.astype('f8'),
                                    b.astype('f8')).astype(dtype))
                c[...] = 0
                np.einsum(a, [0, 1], b, [1], out=c,
                          dtype='f8', casting='unsafe', optimize=do_opt)
                assert_equal(c,
                             np.dot(a.astype('f8'),
                                    b.astype('f8')).astype(dtype))

            for n in range(1, 17):
                a = np.arange(4*n, dtype=dtype).reshape(4, n)
                b = np.arange(n, dtype=dtype)
                assert_equal(np.einsum("ji,j", a.T, b.T, optimize=do_opt),
                             np.dot(b.T, a.T))
                assert_equal(np.einsum(a.T, [1, 0], b.T, [1], optimize=do_opt),
                             np.dot(b.T, a.T))

                c = np.arange(4, dtype=dtype)
                np.einsum("ji,j", a.T, b.T, out=c,
                          dtype='f8', casting='unsafe', optimize=do_opt)
                assert_equal(c,
                             np.dot(b.T.astype('f8'),
                                    a.T.astype('f8')).astype(dtype))
                c[...] = 0
                np.einsum(a.T, [1, 0], b.T, [1], out=c,
                          dtype='f8', casting='unsafe', optimize=do_opt)
                assert_equal(c,
                             np.dot(b.T.astype('f8'),
                                    a.T.astype('f8')).astype(dtype))

            # matmat(a,b) / a.dot(b) where a is matrix, b is matrix
            for n in range(1, 17):
                if n < 8 or dtype != 'f2':
                    a = np.arange(4*n, dtype=dtype).reshape(4, n)
                    b = np.arange(n*6, dtype=dtype).reshape(n, 6)
                    assert_equal(np.einsum("ij,jk", a, b, optimize=do_opt),
                                 np.dot(a, b))
                    assert_equal(np.einsum(a, [0, 1], b, [1, 2], optimize=do_opt),
                                 np.dot(a, b))

            for n in range(1, 17):
                a = np.arange(4*n, dtype=dtype).reshape(4, n)
                b = np.arange(n*6, dtype=dtype).reshape(n, 6)
                c = np.arange(24, dtype=dtype).reshape(4, 6)
                np.einsum("ij,jk", a, b, out=c, dtype='f8', casting='unsafe',
                          optimize=do_opt)
                assert_equal(c,
                             np.dot(a.astype('f8'),
                                    b.astype('f8')).astype(dtype))
                c[...] = 0
                np.einsum(a, [0, 1], b, [1, 2], out=c,
                          dtype='f8', casting='unsafe', optimize=do_opt)
                assert_equal(c,
                             np.dot(a.astype('f8'),
                                    b.astype('f8')).astype(dtype))

            # matrix triple product (note this is not currently an efficient
            # way to multiply 3 matrices)
            a = np.arange(12, dtype=dtype).reshape(3, 4)
            b = np.arange(20, dtype=dtype).reshape(4, 5)
            c = np.arange(30, dtype=dtype).reshape(5, 6)
            if dtype != 'f2':
                assert_equal(np.einsum("ij,jk,kl", a, b, c, optimize=do_opt),
                             a.dot(b).dot(c))
                assert_equal(np.einsum(a, [0, 1], b, [1, 2], c, [2, 3],
                                       optimize=do_opt), a.dot(b).dot(c))

            d = np.arange(18, dtype=dtype).reshape(3, 6)
            np.einsum("ij,jk,kl", a, b, c, out=d,
                      dtype='f8', casting='unsafe', optimize=do_opt)
            tgt = a.astype('f8').dot(b.astype('f8'))
            tgt = tgt.dot(c.astype('f8')).astype(dtype)
            assert_equal(d, tgt)

            d[...] = 0
            np.einsum(a, [0, 1], b, [1, 2], c, [2, 3], out=d,
                      dtype='f8', casting='unsafe', optimize=do_opt)
            tgt = a.astype('f8').dot(b.astype('f8'))
            tgt = tgt.dot(c.astype('f8')).astype(dtype)
            assert_equal(d, tgt)

            # tensordot(a, b)
            if np.dtype(dtype) != np.dtype('f2'):
                a = np.arange(60, dtype=dtype).reshape(3, 4, 5)
                b = np.arange(24, dtype=dtype).reshape(4, 3, 2)
                assert_equal(np.einsum("ijk, jil -> kl", a, b),
                             np.tensordot(a, b, axes=([1, 0], [0, 1])))
                assert_equal(np.einsum(a, [0, 1, 2], b, [1, 0, 3], [2, 3]),
                             np.tensordot(a, b, axes=([1, 0], [0, 1])))

                c = np.arange(10, dtype=dtype).reshape(5, 2)
                np.einsum("ijk,jil->kl", a, b, out=c,
                          dtype='f8', casting='unsafe', optimize=do_opt)
                assert_equal(c, np.tensordot(a.astype('f8'), b.astype('f8'),
                             axes=([1, 0], [0, 1])).astype(dtype))
                c[...] = 0
                np.einsum(a, [0, 1, 2], b, [1, 0, 3], [2, 3], out=c,
                          dtype='f8', casting='unsafe', optimize=do_opt)
                assert_equal(c, np.tensordot(a.astype('f8'), b.astype('f8'),
                             axes=([1, 0], [0, 1])).astype(dtype))

        # logical_and(logical_and(a!=0, b!=0), c!=0)
        a = np.array([1,   3,   -2,   0,   12,  13,   0,   1], dtype=dtype)
        b = np.array([0,   3.5, 0.,   -2,  0,   1,    3,   12], dtype=dtype)
        c = np.array([True, True, False, True, True, False, True, True])
        assert_equal(np.einsum("i,i,i->i", a, b, c,
                     dtype='?', casting='unsafe', optimize=do_opt),
                     np.logical_and(np.logical_and(a != 0, b != 0), c != 0))
        assert_equal(np.einsum(a, [0], b, [0], c, [0], [0],
                     dtype='?', casting='unsafe'),
                     np.logical_and(np.logical_and(a != 0, b != 0), c != 0))

        a = np.arange(9, dtype=dtype)
        assert_equal(np.einsum(",i->", 3, a), 3*np.sum(a))
        assert_equal(np.einsum(3, [], a, [0], []), 3*np.sum(a))
        assert_equal(np.einsum("i,->", a, 3), 3*np.sum(a))
        assert_equal(np.einsum(a, [0], 3, [], []), 3*np.sum(a))

        # Various stride0, contiguous, and SSE aligned variants
        for n in range(1, 25):
            a = np.arange(n, dtype=dtype)
            if np.dtype(dtype).itemsize > 1:
                assert_equal(np.einsum("...,...", a, a, optimize=do_opt),
                             np.multiply(a, a))
                assert_equal(np.einsum("i,i", a, a, optimize=do_opt), np.dot(a, a))
                assert_equal(np.einsum("i,->i", a, 2, optimize=do_opt), 2*a)
                assert_equal(np.einsum(",i->i", 2, a, optimize=do_opt), 2*a)
                assert_equal(np.einsum("i,->", a, 2, optimize=do_opt), 2*np.sum(a))
                assert_equal(np.einsum(",i->", 2, a, optimize=do_opt), 2*np.sum(a))

                assert_equal(np.einsum("...,...", a[1:], a[:-1], optimize=do_opt),
                             np.multiply(a[1:], a[:-1]))
                assert_equal(np.einsum("i,i", a[1:], a[:-1], optimize=do_opt),
                             np.dot(a[1:], a[:-1]))
                assert_equal(np.einsum("i,->i", a[1:], 2, optimize=do_opt), 2*a[1:])
                assert_equal(np.einsum(",i->i", 2, a[1:], optimize=do_opt), 2*a[1:])
                assert_equal(np.einsum("i,->", a[1:], 2, optimize=do_opt),
                             2*np.sum(a[1:]))
                assert_equal(np.einsum(",i->", 2, a[1:], optimize=do_opt),
                             2*np.sum(a[1:]))

        # An object array, summed as the data type
        a = np.arange(9, dtype=object)

        b = np.einsum("i->", a, dtype=dtype, casting='unsafe')
        assert_equal(b, np.sum(a))
        assert_equal(b.dtype, np.dtype(dtype))

        b = np.einsum(a, [0], [], dtype=dtype, casting='unsafe')
        assert_equal(b, np.sum(a))
        assert_equal(b.dtype, np.dtype(dtype))

        # A case which was failing (ticket #1885)
        p = np.arange(2) + 1
        q = np.arange(4).reshape(2, 2) + 3
        r = np.arange(4).reshape(2, 2) + 7
        assert_equal(np.einsum('z,mz,zm->', p, q, r), 253)

        # singleton dimensions broadcast (gh-10343)
        p = np.ones((10,2))
        q = np.ones((1,2))
        assert_array_equal(np.einsum('ij,ij->j', p, q, optimize=True),
                           np.einsum('ij,ij->j', p, q, optimize=False))
        assert_array_equal(np.einsum('ij,ij->j', p, q, optimize=True),
                           [10.] * 2)

        # a blas-compatible contraction broadcasting case which was failing
        # for optimize=True (ticket #10930)
        x = np.array([2., 3.])
        y = np.array([4.])
        assert_array_equal(np.einsum("i, i", x, y, optimize=False), 20.)
        assert_array_equal(np.einsum("i, i", x, y, optimize=True), 20.)

        # all-ones array was bypassing bug (ticket #10930)
        p = np.ones((1, 5)) / 2
        q = np.ones((5, 5)) / 2
        for optimize in (True, False):
            assert_array_equal(np.einsum("...ij,...jk->...ik", p, p,
                                         optimize=optimize),
                               np.einsum("...ij,...jk->...ik", p, q,
                                         optimize=optimize))
            assert_array_equal(np.einsum("...ij,...jk->...ik", p, q,
                                         optimize=optimize),
                               np.full((1, 5), 1.25))

        # Cases which were failing (gh-10899)
        x = np.eye(2, dtype=dtype)
        y = np.ones(2, dtype=dtype)
        assert_array_equal(np.einsum("ji,i->", x, y, optimize=optimize),
                           [2.])  # contig_contig_outstride0_two
        assert_array_equal(np.einsum("i,ij->", y, x, optimize=optimize),
                           [2.])  # stride0_contig_outstride0_two
        assert_array_equal(np.einsum("ij,i->", x, y, optimize=optimize),
                           [2.])  # contig_stride0_outstride0_two
Exemple #36
0
def test_read_write_files():
    # test round trip for example file
    cwd = os.getcwd()
    try:
        tmpdir = tempfile.mkdtemp()
        os.chdir(tmpdir)
        with make_simple('simple.nc', 'w') as f:
            pass
        # read the file we just created in 'a' mode
        with netcdf_file('simple.nc', 'a') as f:
            check_simple(f)
            # add something
            f._attributes['appendRan'] = 1

        # To read the NetCDF file we just created::
        with netcdf_file('simple.nc') as f:
            # Using mmap is the default (but not on pypy)
            assert_equal(f.use_mmap, not IS_PYPY)
            check_simple(f)
            assert_equal(f._attributes['appendRan'], 1)

        # Read it in append (and check mmap is off)
        with netcdf_file('simple.nc', 'a') as f:
            assert_(not f.use_mmap)
            check_simple(f)
            assert_equal(f._attributes['appendRan'], 1)

        # Now without mmap
        with netcdf_file('simple.nc', mmap=False) as f:
            # Using mmap is the default
            assert_(not f.use_mmap)
            check_simple(f)

        # To read the NetCDF file we just created, as file object, no
        # mmap.  When n * n_bytes(var_type) is not divisible by 4, this
        # raised an error in pupynere 1.0.12 and scipy rev 5893, because
        # calculated vsize was rounding up in units of 4 - see
        # https://www.unidata.ucar.edu/software/netcdf/guide_toc.html
        with open('simple.nc', 'rb') as fobj:
            with netcdf_file(fobj) as f:
                # by default, don't use mmap for file-like
                assert_(not f.use_mmap)
                check_simple(f)

        # Read file from fileobj, with mmap
        with suppress_warnings() as sup:
            if IS_PYPY:
                sup.filter(RuntimeWarning,
                           "Cannot close a netcdf_file opened with mmap=True.*")
            with open('simple.nc', 'rb') as fobj:
                with netcdf_file(fobj, mmap=True) as f:
                    assert_(f.use_mmap)
                    check_simple(f)

        # Again read it in append mode (adding another att)
        with open('simple.nc', 'r+b') as fobj:
            with netcdf_file(fobj, 'a') as f:
                assert_(not f.use_mmap)
                check_simple(f)
                f.createDimension('app_dim', 1)
                var = f.createVariable('app_var', 'i', ('app_dim',))
                var[:] = 42

        # And... check that app_var made it in...
        with netcdf_file('simple.nc') as f:
            check_simple(f)
            assert_equal(f.variables['app_var'][:], 42)

    except:  # noqa: E722
        os.chdir(cwd)
        shutil.rmtree(tmpdir)
        raise
    os.chdir(cwd)
    shutil.rmtree(tmpdir)
Exemple #37
0
    def test_pcov(self):
        xdata = np.array([0, 1, 2, 3, 4, 5])
        ydata = np.array([1, 1, 5, 7, 8, 12])
        sigma = np.array([1, 2, 1, 2, 1, 2])

        def f(x, a, b):
            return a * x + b

        for method in ['lm', 'trf', 'dogbox']:
            popt, pcov = curve_fit(f,
                                   xdata,
                                   ydata,
                                   p0=[2, 0],
                                   sigma=sigma,
                                   method=method)
            perr_scaled = np.sqrt(np.diag(pcov))
            assert_allclose(perr_scaled, [0.20659803, 0.57204404], rtol=1e-3)

            popt, pcov = curve_fit(f,
                                   xdata,
                                   ydata,
                                   p0=[2, 0],
                                   sigma=3 * sigma,
                                   method=method)
            perr_scaled = np.sqrt(np.diag(pcov))
            assert_allclose(perr_scaled, [0.20659803, 0.57204404], rtol=1e-3)

            popt, pcov = curve_fit(f,
                                   xdata,
                                   ydata,
                                   p0=[2, 0],
                                   sigma=sigma,
                                   absolute_sigma=True,
                                   method=method)
            perr = np.sqrt(np.diag(pcov))
            assert_allclose(perr, [0.30714756, 0.85045308], rtol=1e-3)

            popt, pcov = curve_fit(f,
                                   xdata,
                                   ydata,
                                   p0=[2, 0],
                                   sigma=3 * sigma,
                                   absolute_sigma=True,
                                   method=method)
            perr = np.sqrt(np.diag(pcov))
            assert_allclose(perr, [3 * 0.30714756, 3 * 0.85045308], rtol=1e-3)

        # infinite variances

        def f_flat(x, a, b):
            return a * x

        pcov_expected = np.array([np.inf] * 4).reshape(2, 2)

        with suppress_warnings() as sup:
            sup.filter(OptimizeWarning,
                       "Covariance of the parameters could not be estimated")
            popt, pcov = curve_fit(f_flat,
                                   xdata,
                                   ydata,
                                   p0=[2, 0],
                                   sigma=sigma)
            popt1, pcov1 = curve_fit(f, xdata[:2], ydata[:2], p0=[2, 0])

        assert_(pcov.shape == (2, 2))
        assert_array_equal(pcov, pcov_expected)

        assert_(pcov1.shape == (2, 2))
        assert_array_equal(pcov1, pcov_expected)
Exemple #38
0
def test_ellip_norm():
    def G01(h2, k2):
        return 4 * pi

    def G11(h2, k2):
        return 4 * pi * h2 * k2 / 3

    def G12(h2, k2):
        return 4 * pi * h2 * (k2 - h2) / 3

    def G13(h2, k2):
        return 4 * pi * k2 * (k2 - h2) / 3

    def G22(h2, k2):
        res = (2 * (h2**4 + k2**4) - 4 * h2 * k2 * (h2**2 + k2**2) +
               6 * h2**2 * k2**2 + sqrt(h2**2 + k2**2 - h2 * k2) *
               (-2 * (h2**3 + k2**3) + 3 * h2 * k2 * (h2 + k2)))
        return 16 * pi / 405 * res

    def G21(h2, k2):
        res = (2 * (h2**4 + k2**4) - 4 * h2 * k2 * (h2**2 + k2**2) +
               6 * h2**2 * k2**2 + sqrt(h2**2 + k2**2 - h2 * k2) *
               (2 * (h2**3 + k2**3) - 3 * h2 * k2 * (h2 + k2)))
        return 16 * pi / 405 * res

    def G23(h2, k2):
        return 4 * pi * h2**2 * k2 * (k2 - h2) / 15

    def G24(h2, k2):
        return 4 * pi * h2 * k2**2 * (k2 - h2) / 15

    def G25(h2, k2):
        return 4 * pi * h2 * k2 * (k2 - h2)**2 / 15

    def G32(h2, k2):
        res = (16 * (h2**4 + k2**4) - 36 * h2 * k2 * (h2**2 + k2**2) +
               46 * h2**2 * k2**2 + sqrt(4 * (h2**2 + k2**2) - 7 * h2 * k2) *
               (-8 * (h2**3 + k2**3) + 11 * h2 * k2 * (h2 + k2)))
        return 16 * pi / 13125 * k2 * h2 * res

    def G31(h2, k2):
        res = (16 * (h2**4 + k2**4) - 36 * h2 * k2 * (h2**2 + k2**2) +
               46 * h2**2 * k2**2 + sqrt(4 * (h2**2 + k2**2) - 7 * h2 * k2) *
               (8 * (h2**3 + k2**3) - 11 * h2 * k2 * (h2 + k2)))
        return 16 * pi / 13125 * h2 * k2 * res

    def G34(h2, k2):
        res = (6 * h2**4 + 16 * k2**4 - 12 * h2**3 * k2 - 28 * h2 * k2**3 +
               34 * h2**2 * k2**2 + sqrt(h2**2 + 4 * k2**2 - h2 * k2) *
               (-6 * h2**3 - 8 * k2**3 + 9 * h2**2 * k2 + 13 * h2 * k2**2))
        return 16 * pi / 13125 * h2 * (k2 - h2) * res

    def G33(h2, k2):
        res = (6 * h2**4 + 16 * k2**4 - 12 * h2**3 * k2 - 28 * h2 * k2**3 +
               34 * h2**2 * k2**2 + sqrt(h2**2 + 4 * k2**2 - h2 * k2) *
               (6 * h2**3 + 8 * k2**3 - 9 * h2**2 * k2 - 13 * h2 * k2**2))
        return 16 * pi / 13125 * h2 * (k2 - h2) * res

    def G36(h2, k2):
        res = (16 * h2**4 + 6 * k2**4 - 28 * h2**3 * k2 - 12 * h2 * k2**3 +
               34 * h2**2 * k2**2 + sqrt(4 * h2**2 + k2**2 - h2 * k2) *
               (-8 * h2**3 - 6 * k2**3 + 13 * h2**2 * k2 + 9 * h2 * k2**2))
        return 16 * pi / 13125 * k2 * (k2 - h2) * res

    def G35(h2, k2):
        res = (16 * h2**4 + 6 * k2**4 - 28 * h2**3 * k2 - 12 * h2 * k2**3 +
               34 * h2**2 * k2**2 + sqrt(4 * h2**2 + k2**2 - h2 * k2) *
               (8 * h2**3 + 6 * k2**3 - 13 * h2**2 * k2 - 9 * h2 * k2**2))
        return 16 * pi / 13125 * k2 * (k2 - h2) * res

    def G37(h2, k2):
        return 4 * pi * h2**2 * k2**2 * (k2 - h2)**2 / 105

    known_funcs = {
        (0, 1): G01,
        (1, 1): G11,
        (1, 2): G12,
        (1, 3): G13,
        (2, 1): G21,
        (2, 2): G22,
        (2, 3): G23,
        (2, 4): G24,
        (2, 5): G25,
        (3, 1): G31,
        (3, 2): G32,
        (3, 3): G33,
        (3, 4): G34,
        (3, 5): G35,
        (3, 6): G36,
        (3, 7): G37
    }

    def _ellip_norm(n, p, h2, k2):
        func = known_funcs[n, p]
        return func(h2, k2)

    _ellip_norm = np.vectorize(_ellip_norm)

    def ellip_normal_known(h2, k2, n, p):
        return _ellip_norm(n, p, h2, k2)

    # generate both large and small h2 < k2 pairs
    np.random.seed(1234)
    h2 = np.random.pareto(0.5, size=1)
    k2 = h2 * (1 + np.random.pareto(0.5, size=h2.size))

    points = []
    for n in range(4):
        for p in range(1, 2 * n + 2):
            points.append((h2, k2, np.full(h2.size, n), np.full(h2.size, p)))
    points = np.array(points)
    with suppress_warnings() as sup:
        sup.filter(IntegrationWarning, "The occurrence of roundoff error")
        assert_func_equal(ellip_normal, ellip_normal_known, points, rtol=1e-12)
    def test_nchypergeom_wallenius_naive(self):
        # test against a very simple implementation

        np.random.seed(2)
        shape = (2, 4, 3)
        max_m = 100
        m1 = np.random.randint(1, max_m, size=shape)
        m2 = np.random.randint(1, max_m, size=shape)
        N = m1 + m2
        n = randint.rvs(0, N, size=N.shape)
        xl = np.maximum(0, n - m2)
        xu = np.minimum(n, m1)
        x = randint.rvs(xl, xu, size=xl.shape)
        w = np.random.rand(*x.shape) * 2

        def support(N, m1, n, w):
            m2 = N - m1
            xl = np.maximum(0, n - m2)
            xu = np.minimum(n, m1)
            return xl, xu

        @np.vectorize
        def mean(N, m1, n, w):
            m2 = N - m1
            xl, xu = support(N, m1, n, w)

            def fun(u):
                return u / m1 + (1 - (n - u) / m2)**w - 1

            return root_scalar(fun, bracket=(xl, xu)).root

        with suppress_warnings() as sup:
            sup.filter(RuntimeWarning,
                       message="invalid value encountered in mean")
            assert_allclose(nchypergeom_wallenius.mean(N, m1, n, w),
                            mean(N, m1, n, w),
                            rtol=2e-2)

        @np.vectorize
        def variance(N, m1, n, w):
            m2 = N - m1
            u = mean(N, m1, n, w)
            a = u * (m1 - u)
            b = (n - u) * (u + m2 - n)
            return N * a * b / ((N - 1) * (m1 * b + m2 * a))

        with suppress_warnings() as sup:
            sup.filter(RuntimeWarning,
                       message="invalid value encountered in mean")
            assert_allclose(nchypergeom_wallenius.stats(N,
                                                        m1,
                                                        n,
                                                        w,
                                                        moments='v'),
                            variance(N, m1, n, w),
                            rtol=5e-2)

        @np.vectorize
        def pmf(x, N, m1, n, w):
            m2 = N - m1
            xl, xu = support(N, m1, n, w)

            def integrand(t):
                D = w * (m1 - x) + (m2 - (n - x))
                res = (1 - t**(w / D))**x * (1 - t**(1 / D))**(n - x)
                return res

            def f(x):
                t1 = special_binom(m1, x)
                t2 = special_binom(m2, n - x)
                the_integral = quad(integrand,
                                    0,
                                    1,
                                    epsrel=1e-16,
                                    epsabs=1e-16)
                return t1 * t2 * the_integral[0]

            return f(x)

        pmf0 = pmf(x, N, m1, n, w)
        pmf1 = nchypergeom_wallenius.pmf(x, N, m1, n, w)

        atol, rtol = 1e-6, 1e-6
        i = np.abs(pmf1 - pmf0) < atol + rtol * np.abs(pmf0)
        assert (i.sum() > np.prod(shape) / 2)  # works at least half the time

        # for those that fail, discredit the naive implementation
        for N, m1, n, w in zip(N[~i], m1[~i], n[~i], w[~i]):
            # get the support
            m2 = N - m1
            xl, xu = support(N, m1, n, w)
            x = np.arange(xl, xu + 1)

            # calculate sum of pmf over the support
            # the naive implementation is very wrong in these cases
            assert pmf(x, N, m1, n, w).sum() < .5
            assert_allclose(nchypergeom_wallenius.pmf(x, N, m1, n, w).sum(), 1)
Exemple #40
0
 def time_lpgen(self, meth, m, n):
     method, options = meth
     with suppress_warnings() as sup:
         sup.filter(RuntimeWarning, "scipy.linalg.solve\nIll-conditioned")
         linprog(c=self.c, A_ub=self.A, b_ub=self.b,
                 method=method, options=options)
Exemple #41
0
def _test_factory(test, dtype=np.double):
    """Boost test"""
    with suppress_warnings() as sup:
        sup.filter(IntegrationWarning, "The occurrence of roundoff error is detected")
        with np.errstate(all='ignore'):
            test.check(dtype=dtype)
Exemple #42
0
 def test_cheb_even_high_attenuation(self):
     with suppress_warnings() as sup:
         sup.filter(UserWarning, "This window is not suitable")
         cheb_even = windows.chebwin(54, at=40)
     assert_array_almost_equal(cheb_even, cheb_even_true, decimal=4)
Exemple #43
0
def binned_statistic_dd(sample,
                        values,
                        statistic='mean',
                        bins=10,
                        range=None,
                        expand_binnumbers=False,
                        binned_statistic_result=None):
    """
    Compute a multidimensional binned statistic for a set of data.

    This is a generalization of a histogramdd function.  A histogram divides
    the space into bins, and returns the count of the number of points in
    each bin.  This function allows the computation of the sum, mean, median,
    or other statistic of the values within each bin.

    Parameters
    ----------
    sample : array_like
        Data to histogram passed as a sequence of N arrays of length D, or
        as an (N,D) array.
    values : (N,) array_like or list of (N,) array_like
        The data on which the statistic will be computed.  This must be
        the same shape as `sample`, or a list of sequences - each with the
        same shape as `sample`.  If `values` is such a list, the statistic
        will be computed on each independently.
    statistic : string or callable, optional
        The statistic to compute (default is 'mean').
        The following statistics are available:

          * 'mean' : compute the mean of values for points within each bin.
            Empty bins will be represented by NaN.
          * 'median' : compute the median of values for points within each
            bin. Empty bins will be represented by NaN.
          * 'count' : compute the count of points within each bin.  This is
            identical to an unweighted histogram.  `values` array is not
            referenced.
          * 'sum' : compute the sum of values for points within each bin.
            This is identical to a weighted histogram.
          * 'std' : compute the standard deviation within each bin. This
            is implicitly calculated with ddof=0.
          * 'min' : compute the minimum of values for points within each bin.
            Empty bins will be represented by NaN.
          * 'max' : compute the maximum of values for point within each bin.
            Empty bins will be represented by NaN.
          * function : a user-defined function which takes a 1D array of
            values, and outputs a single numerical statistic. This function
            will be called on the values in each bin.  Empty bins will be
            represented by function([]), or NaN if this returns an error.

    bins : sequence or positive int, optional
        The bin specification must be in one of the following forms:

          * A sequence of arrays describing the bin edges along each dimension.
          * The number of bins for each dimension (nx, ny, ... = bins).
          * The number of bins for all dimensions (nx = ny = ... = bins).
    range : sequence, optional
        A sequence of lower and upper bin edges to be used if the edges are
        not given explicitly in `bins`. Defaults to the minimum and maximum
        values along each dimension.
    expand_binnumbers : bool, optional
        'False' (default): the returned `binnumber` is a shape (N,) array of
        linearized bin indices.
        'True': the returned `binnumber` is 'unraveled' into a shape (D,N)
        ndarray, where each row gives the bin numbers in the corresponding
        dimension.
        See the `binnumber` returned value, and the `Examples` section of
        `binned_statistic_2d`.
    binned_statistic_result : binnedStatisticddResult
        Result of a previous call to the function in order to reuse bin edges
        and bin numbers with new values and/or a different statistic.
        To reuse bin numbers, `expand_binnumbers` must have been set to False
        (the default)

        .. versionadded:: 0.17.0

    Returns
    -------
    statistic : ndarray, shape(nx1, nx2, nx3,...)
        The values of the selected statistic in each two-dimensional bin.
    bin_edges : list of ndarrays
        A list of D arrays describing the (nxi + 1) bin edges for each
        dimension.
    binnumber : (N,) array of ints or (D,N) ndarray of ints
        This assigns to each element of `sample` an integer that represents the
        bin in which this observation falls.  The representation depends on the
        `expand_binnumbers` argument.  See `Notes` for details.


    See Also
    --------
    numpy.digitize, numpy.histogramdd, binned_statistic, binned_statistic_2d

    Notes
    -----
    Binedges:
    All but the last (righthand-most) bin is half-open in each dimension.  In
    other words, if `bins` is ``[1, 2, 3, 4]``, then the first bin is
    ``[1, 2)`` (including 1, but excluding 2) and the second ``[2, 3)``.  The
    last bin, however, is ``[3, 4]``, which *includes* 4.

    `binnumber`:
    This returned argument assigns to each element of `sample` an integer that
    represents the bin in which it belongs.  The representation depends on the
    `expand_binnumbers` argument. If 'False' (default): The returned
    `binnumber` is a shape (N,) array of linearized indices mapping each
    element of `sample` to its corresponding bin (using row-major ordering).
    If 'True': The returned `binnumber` is a shape (D,N) ndarray where
    each row indicates bin placements for each dimension respectively.  In each
    dimension, a binnumber of `i` means the corresponding value is between
    (bin_edges[D][i-1], bin_edges[D][i]), for each dimension 'D'.

    .. versionadded:: 0.11.0

    Examples
    --------
    >>> from scipy import stats
    >>> import matplotlib.pyplot as plt
    >>> from mpl_toolkits.mplot3d import Axes3D

    Take an array of 600 (x, y) coordinates as an example.
    `binned_statistic_dd` can handle arrays of higher dimension `D`. But a plot
    of dimension `D+1` is required.

    >>> mu = np.array([0., 1.])
    >>> sigma = np.array([[1., -0.5],[-0.5, 1.5]])
    >>> multinormal = stats.multivariate_normal(mu, sigma)
    >>> data = multinormal.rvs(size=600, random_state=235412)
    >>> data.shape
    (600, 2)

    Create bins and count how many arrays fall in each bin:

    >>> N = 60
    >>> x = np.linspace(-3, 3, N)
    >>> y = np.linspace(-3, 4, N)
    >>> ret = stats.binned_statistic_dd(data, np.arange(600), bins=[x, y],
    ...                                 statistic='count')
    >>> bincounts = ret.statistic

    Set the volume and the location of bars:

    >>> dx = x[1] - x[0]
    >>> dy = y[1] - y[0]
    >>> x, y = np.meshgrid(x[:-1]+dx/2, y[:-1]+dy/2)
    >>> z = 0

    >>> bincounts = bincounts.ravel()
    >>> x = x.ravel()
    >>> y = y.ravel()

    >>> fig = plt.figure()
    >>> ax = fig.add_subplot(111, projection='3d')
    >>> with np.errstate(divide='ignore'):   # silence random axes3d warning
    ...     ax.bar3d(x, y, z, dx, dy, bincounts)

    Reuse bin numbers and bin edges with new values:

    >>> ret2 = stats.binned_statistic_dd(data, -np.arange(600),
    ...                                  binned_statistic_result=ret,
    ...                                  statistic='mean')
    """
    known_stats = ['mean', 'median', 'count', 'sum', 'std', 'min', 'max']
    if not callable(statistic) and statistic not in known_stats:
        raise ValueError('invalid statistic %r' % (statistic, ))

    if not np.isfinite(values).all() or not np.isfinite(sample).all:
        raise ValueError('%r or %r contains non-finite values.' % (
            sample,
            values,
        ))

    # `Ndim` is the number of dimensions (e.g. `2` for `binned_statistic_2d`)
    # `Dlen` is the length of elements along each dimension.
    # This code is based on np.histogramdd
    try:
        # `sample` is an ND-array.
        Dlen, Ndim = sample.shape
    except (AttributeError, ValueError):
        # `sample` is a sequence of 1D arrays.
        sample = np.atleast_2d(sample).T
        Dlen, Ndim = sample.shape

    # Store initial shape of `values` to preserve it in the output
    values = np.asarray(values)
    input_shape = list(values.shape)
    # Make sure that `values` is 2D to iterate over rows
    values = np.atleast_2d(values)
    Vdim, Vlen = values.shape

    # Make sure `values` match `sample`
    if (statistic != 'count' and Vlen != Dlen):
        raise AttributeError('The number of `values` elements must match the '
                             'length of each `sample` dimension.')

    try:
        M = len(bins)
        if M != Ndim:
            raise AttributeError('The dimension of bins must be equal '
                                 'to the dimension of the sample x.')
    except TypeError:
        bins = Ndim * [bins]

    if binned_statistic_result is None:
        nbin, edges, dedges = _bin_edges(sample, bins, range)
        binnumbers = _bin_numbers(sample, nbin, edges, dedges)
    else:
        edges = binned_statistic_result.bin_edges
        nbin = np.array([len(edges[i]) + 1 for i in builtins.range(Ndim)])
        # +1 for outlier bins
        dedges = [np.diff(edges[i]) for i in builtins.range(Ndim)]
        binnumbers = binned_statistic_result.binnumber

    result = np.empty([Vdim, nbin.prod()], float)

    if statistic == 'mean':
        result.fill(np.nan)
        flatcount = np.bincount(binnumbers, None)
        a = flatcount.nonzero()
        for vv in builtins.range(Vdim):
            flatsum = np.bincount(binnumbers, values[vv])
            result[vv, a] = flatsum[a] / flatcount[a]
    elif statistic == 'std':
        result.fill(0)
        flatcount = np.bincount(binnumbers, None)
        a = flatcount.nonzero()
        for i in np.unique(binnumbers):
            for vv in builtins.range(Vdim):
                # NOTE: take std dev by bin, np.std() is 2-pass and stable
                result[vv, i] = np.std(values[vv, binnumbers == i])
    elif statistic == 'count':
        result.fill(0)
        flatcount = np.bincount(binnumbers, None)
        a = np.arange(len(flatcount))
        result[:, a] = flatcount[np.newaxis, :]
    elif statistic == 'sum':
        result.fill(0)
        for vv in builtins.range(Vdim):
            flatsum = np.bincount(binnumbers, values[vv])
            a = np.arange(len(flatsum))
            result[vv, a] = flatsum
    elif statistic == 'median':
        result.fill(np.nan)
        for i in np.unique(binnumbers):
            for vv in builtins.range(Vdim):
                result[vv, i] = np.median(values[vv, binnumbers == i])
    elif statistic == 'min':
        result.fill(np.nan)
        for i in np.unique(binnumbers):
            for vv in builtins.range(Vdim):
                result[vv, i] = np.min(values[vv, binnumbers == i])
    elif statistic == 'max':
        result.fill(np.nan)
        for i in np.unique(binnumbers):
            for vv in builtins.range(Vdim):
                result[vv, i] = np.max(values[vv, binnumbers == i])
    elif callable(statistic):
        with np.errstate(invalid='ignore'), suppress_warnings() as sup:
            sup.filter(RuntimeWarning)
            try:
                null = statistic([])
            except Exception:
                null = np.nan
        result.fill(null)
        for i in np.unique(binnumbers):
            for vv in builtins.range(Vdim):
                result[vv, i] = statistic(values[vv, binnumbers == i])

    # Shape into a proper matrix
    result = result.reshape(np.append(Vdim, nbin))

    # Remove outliers (indices 0 and -1 for each bin-dimension).
    core = tuple([slice(None)] + Ndim * [slice(1, -1)])
    result = result[core]

    # Unravel binnumbers into an ndarray, each row the bins for each dimension
    if (expand_binnumbers and Ndim > 1):
        binnumbers = np.asarray(np.unravel_index(binnumbers, nbin))

    if np.any(result.shape[1:] != nbin - 2):
        raise RuntimeError('Internal Shape Error')

    # Reshape to have output (`result`) match input (`values`) shape
    result = result.reshape(input_shape[:-1] + list(nbin - 2))

    return BinnedStatisticddResult(result, edges, binnumbers)
def test_cont_basic(distname, arg, sn, n_fit_samples):
    # this test skips slow distributions

    try:
        distfn = getattr(stats, distname)
    except TypeError:
        distfn = distname
        distname = 'rv_histogram_instance'

    rng = np.random.RandomState(765456)
    rvs = distfn.rvs(size=sn, *arg, random_state=rng)
    sm = rvs.mean()
    sv = rvs.var()
    m, v = distfn.stats(*arg)

    check_sample_meanvar_(distfn, arg, m, v, sm, sv, sn,
                          distname + 'sample mean test')
    check_cdf_ppf(distfn, arg, distname)
    check_sf_isf(distfn, arg, distname)
    check_pdf(distfn, arg, distname)
    check_pdf_logpdf(distfn, arg, distname)
    check_pdf_logpdf_at_endpoints(distfn, arg, distname)
    check_cdf_logcdf(distfn, arg, distname)
    check_sf_logsf(distfn, arg, distname)
    check_ppf_broadcast(distfn, arg, distname)

    alpha = 0.01
    if distname == 'rv_histogram_instance':
        check_distribution_rvs(distfn.cdf, arg, alpha, rvs)
    elif distname != 'geninvgauss':
        # skip kstest for geninvgauss since cdf is too slow; see test for
        # rv generation in TestGenInvGauss in test_distributions.py
        check_distribution_rvs(distname, arg, alpha, rvs)

    locscale_defaults = (0, 1)
    meths = [
        distfn.pdf, distfn.logpdf, distfn.cdf, distfn.logcdf, distfn.logsf
    ]
    # make sure arguments are within support
    spec_x = {
        'weibull_max': -0.5,
        'levy_l': -0.5,
        'pareto': 1.5,
        'tukeylambda': 0.3,
        'rv_histogram_instance': 5.0
    }
    x = spec_x.get(distname, 0.5)
    if distname == 'invweibull':
        arg = (1, )
    elif distname == 'ksone':
        arg = (3, )

    check_named_args(distfn, x, arg, locscale_defaults, meths)
    check_random_state_property(distfn, arg)
    check_pickling(distfn, arg)
    check_freezing(distfn, arg)

    # Entropy
    if distname not in ['kstwobign', 'kstwo']:
        check_entropy(distfn, arg, distname)

    if distfn.numargs == 0:
        check_vecentropy(distfn, arg)

    if (distfn.__class__._entropy != stats.rv_continuous._entropy
            and distname != 'vonmises'):
        check_private_entropy(distfn, arg, stats.rv_continuous)

    with npt.suppress_warnings() as sup:
        sup.filter(IntegrationWarning, "The occurrence of roundoff error")
        sup.filter(IntegrationWarning, "Extremely bad integrand")
        sup.filter(RuntimeWarning, "invalid value")
        check_entropy_vect_scale(distfn, arg)

    check_retrieving_support(distfn, arg)
    check_edge_support(distfn, arg)

    check_meth_dtype(distfn, arg, meths)
    check_ppf_dtype(distfn, arg)

    if distname not in fails_cmplx:
        check_cmplx_deriv(distfn, arg)

    if distname != 'truncnorm':
        check_ppf_private(distfn, arg, distname)

    for method in ["MLE", "MM"]:
        if distname not in skip_fit_test[method]:
            check_fit_args(distfn, arg, rvs[:n_fit_samples], method)

        if distname not in skip_fit_fix_test[method]:
            check_fit_args_fix(distfn, arg, rvs[:n_fit_samples], method)
Exemple #45
0
def _axis_nan_policy_test(hypotest, args, kwds, n_samples, n_outputs, paired,
                          unpacker, nan_policy, axis, data_generator):
    # Tests the 1D and vectorized behavior of hypothesis tests against a
    # reference implementation (nan_policy_1d with np.ndenumerate)

    # Some hypothesis tests return a non-iterable that needs an `unpacker` to
    # extract the statistic and p-value. For those that don't:
    if not unpacker:

        def unpacker(res):
            return res

    if NumpyVersion(np.__version__) < '1.18.0':
        pytest.xfail("Generator `permutation` method doesn't support `axis`")
    rng = np.random.default_rng(0)

    # Generate multi-dimensional test data with all important combinations
    # of patterns of nans along `axis`
    n_repetitions = 3  # number of repetitions of each pattern
    data_gen_kwds = {
        'n_samples': n_samples,
        'n_repetitions': n_repetitions,
        'axis': axis,
        'rng': rng,
        'paired': paired
    }
    if data_generator == 'mixed':
        inherent_size = 6  # number of distinct types of patterns
        data = _mixed_data_generator(**data_gen_kwds)
    elif data_generator == 'all_nans':
        inherent_size = 2  # hard-coded in _homogeneous_data_generator
        data_gen_kwds['all_nans'] = True
        data = _homogeneous_data_generator(**data_gen_kwds)
    elif data_generator == 'all_finite':
        inherent_size = 2  # hard-coded in _homogeneous_data_generator
        data_gen_kwds['all_nans'] = False
        data = _homogeneous_data_generator(**data_gen_kwds)

    output_shape = [n_repetitions] + [inherent_size] * n_samples

    # To generate reference behavior to compare against, loop over the axis-
    # slices in data. Make indexing easier by moving `axis` to the end and
    # broadcasting all samples to the same shape.
    data_b = [np.moveaxis(sample, axis, -1) for sample in data]
    data_b = [
        np.broadcast_to(sample, output_shape + [sample.shape[-1]])
        for sample in data_b
    ]
    statistics = np.zeros(output_shape)
    pvalues = np.zeros(output_shape)

    for i, _ in np.ndenumerate(statistics):
        data1d = [sample[i] for sample in data_b]
        with np.errstate(divide='ignore', invalid='ignore'):
            try:
                res1d = nan_policy_1d(hypotest,
                                      data1d,
                                      unpacker,
                                      *args,
                                      n_outputs=n_outputs,
                                      nan_policy=nan_policy,
                                      paired=paired,
                                      _no_deco=True,
                                      **kwds)

                # Eventually we'll check the results of a single, vectorized
                # call of `hypotest` against the arrays `statistics` and
                # `pvalues` populated using the reference `nan_policy_1d`.
                # But while we're at it, check the results of a 1D call to
                # `hypotest` against the reference `nan_policy_1d`.
                res1db = unpacker(
                    hypotest(*data1d, *args, nan_policy=nan_policy, **kwds))
                assert_equal(res1db[0], res1d[0])
                if len(res1db) == 2:
                    assert_equal(res1db[1], res1d[1])

            # When there is not enough data in 1D samples, many existing
            # hypothesis tests raise errors instead of returning nans .
            # For vectorized calls, we put nans in the corresponding elements
            # of the output.
            except (RuntimeWarning, UserWarning, ValueError,
                    ZeroDivisionError) as e:

                # whatever it is, make sure same error is raised by both
                # `nan_policy_1d` and `hypotest`
                with pytest.raises(type(e), match=re.escape(str(e))):
                    nan_policy_1d(hypotest,
                                  data1d,
                                  unpacker,
                                  *args,
                                  n_outputs=n_outputs,
                                  nan_policy=nan_policy,
                                  paired=paired,
                                  _no_deco=True,
                                  **kwds)
                with pytest.raises(type(e), match=re.escape(str(e))):
                    hypotest(*data1d, *args, nan_policy=nan_policy, **kwds)

                if any([
                        str(e).startswith(message)
                        for message in too_small_messages
                ]):
                    res1d = np.full(n_outputs, np.nan)
                elif any([
                        str(e).startswith(message)
                        for message in inaccuracy_messages
                ]):
                    with suppress_warnings() as sup:
                        sup.filter(RuntimeWarning)
                        sup.filter(UserWarning)
                        res1d = nan_policy_1d(hypotest,
                                              data1d,
                                              unpacker,
                                              *args,
                                              n_outputs=n_outputs,
                                              nan_policy=nan_policy,
                                              paired=paired,
                                              _no_deco=True,
                                              **kwds)
                else:
                    raise e
        statistics[i] = res1d[0]
        if len(res1d) == 2:
            pvalues[i] = res1d[1]

    # Perform a vectorized call to the hypothesis test.
    # If `nan_policy == 'raise'`, check that it raises the appropriate error.
    # If not, compare against the output against `statistics` and `pvalues`
    if nan_policy == 'raise' and not data_generator == "all_finite":
        message = 'The input contains nan values'
        with pytest.raises(ValueError, match=message):
            hypotest(*data, axis=axis, nan_policy=nan_policy, *args, **kwds)

    else:
        with suppress_warnings() as sup, \
             np.errstate(divide='ignore', invalid='ignore'):
            sup.filter(RuntimeWarning, "Precision loss occurred in moment")
            sup.filter(UserWarning, "Sample size too small for normal "
                       "approximation.")
            res = unpacker(
                hypotest(*data,
                         axis=axis,
                         nan_policy=nan_policy,
                         *args,
                         **kwds))

        if hypotest.__name__ in {"gmean"}:
            assert_allclose(res[0], statistics, rtol=2e-16)
        else:
            assert_equal(res[0], statistics)

        assert_equal(res[0].dtype, statistics.dtype)
        if len(res) == 2:
            assert_equal(res[1], pvalues)
            assert_equal(res[1].dtype, pvalues.dtype)
Exemple #46
0
 def test_cheb_even(self):
     with suppress_warnings() as sup:
         sup.filter(UserWarning, "This window is not suitable")
         w = windows.get_window(('chebwin', 40), 54, fftbins=False)
     assert_array_almost_equal(w, cheb_even_true, decimal=4)
Exemple #47
0
    def test_L1(self):
        # Lampinen ([5]) test problem 1

        def f(x):
            x = np.hstack(([0], x))  # 1-indexed to match reference
            fun = np.sum(5*x[1:5]) - 5*x[1:5]@x[1:5] - np.sum(x[5:])
            return fun

        A = np.zeros((10, 14))  # 1-indexed to match reference
        A[1, [1, 2, 10, 11]] = 2, 2, 1, 1
        A[2, [1, 10]] = -8, 1
        A[3, [4, 5, 10]] = -2, -1, 1
        A[4, [1, 3, 10, 11]] = 2, 2, 1, 1
        A[5, [2, 11]] = -8, 1
        A[6, [6, 7, 11]] = -2, -1, 1
        A[7, [2, 3, 11, 12]] = 2, 2, 1, 1
        A[8, [3, 12]] = -8, 1
        A[9, [8, 9, 12]] = -2, -1, 1
        A = A[1:, 1:]

        b = np.array([10, 0, 0, 10, 0, 0, 10, 0, 0])

        L = LinearConstraint(A, -np.inf, b)

        bounds = [(0, 1)]*9 + [(0, 100)]*3 + [(0, 1)]

        # using a lower popsize to speed the test up
        res = differential_evolution(f, bounds, strategy='best1bin', seed=1234,
                                     constraints=(L), popsize=2)

        x_opt = (1, 1, 1, 1, 1, 1, 1, 1, 1, 3, 3, 3, 1)
        f_opt = -15

        assert_allclose(f(x_opt), f_opt)
        assert res.success
        assert_allclose(res.x, x_opt, atol=5e-4)
        assert_allclose(res.fun, f_opt, atol=5e-3)
        assert_(np.all([email protected] <= b))
        assert_(np.all(res.x >= np.array(bounds)[:, 0]))
        assert_(np.all(res.x <= np.array(bounds)[:, 1]))

        # now repeat the same solve, using the same overall constraints,
        # but specify half the constraints in terms of LinearConstraint,
        # and the other half by NonlinearConstraint
        def c1(x):
            x = np.hstack(([0], x))
            return [2*x[2] + 2*x[3] + x[11] + x[12],
                    -8*x[3] + x[12]]

        def c2(x):
            x = np.hstack(([0], x))
            return -2*x[8] - x[9] + x[12]

        L = LinearConstraint(A[:5, :], -np.inf, b[:5])
        L2 = LinearConstraint(A[5:6, :], -np.inf, b[5:6])
        N = NonlinearConstraint(c1, -np.inf, b[6:8])
        N2 = NonlinearConstraint(c2, -np.inf, b[8:9])
        constraints = (L, N, L2, N2)

        with suppress_warnings() as sup:
            sup.filter(UserWarning)
            res = differential_evolution(f, bounds, strategy='rand1bin',
                                         seed=1234, constraints=constraints,
                                         popsize=2)

        assert_allclose(res.x, x_opt, atol=5e-4)
        assert_allclose(res.fun, f_opt, atol=5e-3)
        assert_(np.all([email protected] <= b))
        assert_(np.all(res.x >= np.array(bounds)[:, 0]))
        assert_(np.all(res.x <= np.array(bounds)[:, 1]))
Exemple #48
0
 def lti_nowarn(self, *args):
     with suppress_warnings() as sup:
         sup.filter(BadCoefficients)
         system = lti(*args)
     return system
Exemple #49
0
 def test_old_radius_api(self):
     sv_unit = SphericalVoronoi(self.points, radius=1)
     with suppress_warnings() as sup:
         sup.filter(DeprecationWarning, "`radius` is `None`")
         sv = SphericalVoronoi(self.points, None)
         assert_array_almost_equal(sv_unit.vertices, sv.vertices)
Exemple #50
0
    def test_individual_constraint_objects(self):
        fun = lambda x: (x[0] - 1)**2 + (x[1] - 2.5)**2 + (x[2] - 0.75)**2
        x0 = [2, 0, 1]

        cone = []  # with equality constraints (can't use cobyla)
        coni = []  # only inequality constraints (can use cobyla)
        methods = ["slsqp", "cobyla", "trust-constr"]

        # nonstandard area_data types for constraint equality bounds
        cone.append(NonlinearConstraint(lambda x: x[0] - x[1], 1, 1))
        cone.append(NonlinearConstraint(lambda x: x[0] - x[1], [1.21], [1.21]))
        cone.append(
            NonlinearConstraint(lambda x: x[0] - x[1], 1.21, np.array([1.21])))

        # multiple equalities
        cone.append(
            NonlinearConstraint(lambda x: [x[0] - x[1], x[1] - x[2]], 1.21,
                                1.21))  # two same equalities
        cone.append(
            NonlinearConstraint(lambda x: [x[0] - x[1], x[1] - x[2]],
                                [1.21, 1.4],
                                [1.21, 1.4]))  # two different equalities
        cone.append(
            NonlinearConstraint(lambda x: [x[0] - x[1], x[1] - x[2]],
                                [1.21, 1.21],
                                1.21))  # equality specified two ways
        cone.append(
            NonlinearConstraint(lambda x: [x[0] - x[1], x[1] - x[2]],
                                [1.21, -np.inf],
                                [1.21, np.inf]))  # equality + unbounded

        # nonstandard area_data types for constraint inequality bounds
        coni.append(NonlinearConstraint(lambda x: x[0] - x[1], 1.21, np.inf))
        coni.append(NonlinearConstraint(lambda x: x[0] - x[1], [1.21], np.inf))
        coni.append(
            NonlinearConstraint(lambda x: x[0] - x[1], 1.21,
                                np.array([np.inf])))
        coni.append(NonlinearConstraint(lambda x: x[0] - x[1], -np.inf, -3))
        coni.append(
            NonlinearConstraint(lambda x: x[0] - x[1], np.array(-np.inf), -3))

        # multiple inequalities/equalities
        coni.append(
            NonlinearConstraint(lambda x: [x[0] - x[1], x[1] - x[2]], 1.21,
                                np.inf))  # two same inequalities
        cone.append(
            NonlinearConstraint(lambda x: [x[0] - x[1], x[1] - x[2]],
                                [1.21, -np.inf],
                                [1.21, 1.4]))  # mixed equality/inequality
        coni.append(
            NonlinearConstraint(lambda x: [x[0] - x[1], x[1] - x[2]],
                                [1.1, .8],
                                [1.2, 1.4]))  # bounded above and below
        coni.append(
            NonlinearConstraint(lambda x: [x[0] - x[1], x[1] - x[2]],
                                [-1.2, -1.4],
                                [-1.1, -.8]))  # - bounded above and below

        # quick check of LinearConstraint class (very little new code to test)
        cone.append(LinearConstraint([1, -1, 0], 1.21, 1.21))
        cone.append(LinearConstraint([[1, -1, 0], [0, 1, -1]], 1.21, 1.21))
        cone.append(
            LinearConstraint([[1, -1, 0], [0, 1, -1]], [1.21, -np.inf],
                             [1.21, 1.4]))

        for con in coni:
            funs = {}
            for method in methods:
                with suppress_warnings() as sup:
                    sup.filter(UserWarning)
                    result = minimize(fun, x0, method=method, constraints=con)
                    funs[method] = result.fun
            assert_allclose(funs['slsqp'], funs['trust-constr'], rtol=1e-3)
            assert_allclose(funs['cobyla'], funs['trust-constr'], rtol=1e-3)

        for con in cone:
            funs = {}
            for method in methods[::2]:  # skip cobyla
                with suppress_warnings() as sup:
                    sup.filter(UserWarning)
                    result = minimize(fun, x0, method=method, constraints=con)
                    funs[method] = result.fun
            assert_allclose(funs['slsqp'], funs['trust-constr'], rtol=1e-3)
Exemple #51
0
def test_unindent():
    with suppress_warnings() as sup:
        sup.filter(category=DeprecationWarning)
        assert_equal(doccer.unindent_string(param_doc1), param_doc1)
        assert_equal(doccer.unindent_string(param_doc2), param_doc2)
        assert_equal(doccer.unindent_string(param_doc3), param_doc1)
def test_integration():
    rtol = 1e-3
    atol = 1e-6
    y0 = [1/3, 2/9]

    for vectorized, method, t_span, jac in product(
            [False, True],
            ['RK23', 'RK45', 'DOP853', 'Radau', 'BDF', 'LSODA'],
            [[5, 9], [5, 1]],
            [None, jac_rational, jac_rational_sparse]):

        if vectorized:
            fun = fun_rational_vectorized
        else:
            fun = fun_rational

        with suppress_warnings() as sup:
            sup.filter(UserWarning,
                       "The following arguments have no effect for a chosen "
                       "solver: `jac`")
            res = solve_ivp(fun, t_span, y0, rtol=rtol,
                            atol=atol, method=method, dense_output=True,
                            jac=jac, vectorized=vectorized)
        assert_equal(res.t[0], t_span[0])
        assert_(res.t_events is None)
        assert_(res.y_events is None)
        assert_(res.success)
        assert_equal(res.status, 0)

        if method == 'DOP853':
            # DOP853 spends more functions evaluation because it doesn't
            # have enough time to develop big enough step size.
            assert_(res.nfev < 50)
        else:
            assert_(res.nfev < 40)

        if method in ['RK23', 'RK45', 'DOP853', 'LSODA']:
            assert_equal(res.njev, 0)
            assert_equal(res.nlu, 0)
        else:
            assert_(0 < res.njev < 3)
            assert_(0 < res.nlu < 10)

        y_true = sol_rational(res.t)
        e = compute_error(res.y, y_true, rtol, atol)
        assert_(np.all(e < 5))

        tc = np.linspace(*t_span)
        yc_true = sol_rational(tc)
        yc = res.sol(tc)

        e = compute_error(yc, yc_true, rtol, atol)
        assert_(np.all(e < 5))

        tc = (t_span[0] + t_span[-1]) / 2
        yc_true = sol_rational(tc)
        yc = res.sol(tc)

        e = compute_error(yc, yc_true, rtol, atol)
        assert_(np.all(e < 5))

        # LSODA for some reasons doesn't pass the polynomial through the
        # previous points exactly after the order change. It might be some
        # bug in LSOSA implementation or maybe we missing something.
        if method != 'LSODA':
            assert_allclose(res.sol(res.t), res.y, rtol=1e-15, atol=1e-15)
 def test_inf(self, k, n, p):
     with suppress_warnings() as sup:
         sup.filter(DeprecationWarning)
         val = sc.bdtri(k, n, p)
     assert np.isnan(val)
Exemple #54
0
# from math import *
import warnings
import numpy as np
from numpy.testing import suppress_warnings
from scipy.integrate import solve_ivp
from numba import jit, NumbaDeprecationWarning, NumbaPendingDeprecationWarning, NumbaWarning
import matplotlib as mpl
from matplotlib import colors
import matplotlib.pyplot as plt
from matplotlib.pyplot import cm
import nolds as nd
import seaborn as sns

# If we want to ignore all warnings:
warnings.filterwarnings("ignore")
with suppress_warnings() as sup:
    sup.filter(np.ComplexWarning)

warnings.simplefilter('ignore', category=NumbaDeprecationWarning)
warnings.simplefilter('ignore', category=NumbaPendingDeprecationWarning)
warnings.simplefilter('ignore', category=NumbaWarning)

# constants
M = 0.0005  # kg
G = 9.81  # m s^-2
μ = 0.1  # s^-s
L = 0.1  # m
Q = 10**(-8)  # C
E0 = 10**6  # V m^-1
ω = 11  # rad s^-1