Esempio n. 1
0
def mpmath_check(min_ver):
    if mpmath is None:
        return dec.skipif(True, "mpmath library is not present")

    def try_int(v):
        try: return int(v)
        except ValueError: return v

    def get_version(v):
        return map(try_int, re.split('[^0-9]', v))

    return dec.skipif(get_version(min_ver) > get_version(mpmath.__version__),
                      "mpmath %s required" % min_ver)
Esempio n. 2
0
class TestScalarPEP3118(object):
    skip_if_no_buffer_interface = dec.skipif(sys.version_info.major < 3,
                "scalars do not implement buffer interface in Python 2")

    @skip_if_no_buffer_interface
    def test_scalar_match_array(self):
        for scalar, _ in scalars_and_codes:
            x = scalar()
            a = np.array([], dtype=np.dtype(scalar))
            mv_x = memoryview(x)
            mv_a = memoryview(a)
            assert_equal(mv_x.format, mv_a.format)

    @skip_if_no_buffer_interface
    def test_scalar_dim(self):
        for scalar, _ in scalars_and_codes:
            x = scalar()
            mv_x = memoryview(x)
            assert_equal(mv_x.itemsize, np.dtype(scalar).itemsize)
            assert_equal(mv_x.ndim, 0)
            assert_equal(mv_x.shape, ())
            assert_equal(mv_x.strides, ())
            assert_equal(mv_x.suboffsets, ())

    @skip_if_no_buffer_interface
    def test_scalar_known_code(self):
        for scalar, code in scalars_and_codes:
            x = scalar()
            mv_x = memoryview(x)
            assert_equal(mv_x.format, code)

    @skip_if_no_buffer_interface
    def test_void_scalar_structured_data(self):
        dt = np.dtype([('name', np.unicode_, 16), ('grades', np.float64, (2,))])
        x = np.array(('ndarray_scalar', (1.2, 3.0)), dtype=dt)[()]
        assert_(isinstance(x, np.void))
        mv_x = memoryview(x)
        expected_size = 16 * np.dtype((np.unicode_, 1)).itemsize
        expected_size += 2 * np.dtype((np.float64, 1)).itemsize
        assert_equal(mv_x.itemsize, expected_size)
        assert_equal(mv_x.ndim, 0)
        assert_equal(mv_x.shape, ())
        assert_equal(mv_x.strides, ())
        assert_equal(mv_x.suboffsets, ())

        # check scalar format string against ndarray format string
        a = np.array([('Sarah', (8.0, 7.0)), ('John', (6.0, 7.0))], dtype=dt)
        assert_(isinstance(a, np.ndarray))
        mv_a = memoryview(a)
        assert_equal(mv_x.itemsize, mv_a.itemsize)
        assert_equal(mv_x.format, mv_a.format)
Esempio n. 3
0
def nonfunctional_tooslow(func):
    return dec.skipif(True, "    Test not yet functional (too slow), needs more work.")(func)
import sys
import os
import stat
import tempfile

from distutils.dir_util import remove_tree

from numpy.testing import TestCase, assert_, dec
from numpy.testing.noseclasses import KnownFailureTest

from scipy.weave import catalog
from weave_test_utils import clear_temp_catalog, restore_temp_catalog, \
        empty_temp_dir, cleanup_temp_dir

skip_on_windows = dec.skipif(sys.platform == 'win32',
                             "Test works only on posix")


def samefile(a, b):
    try:
        return os.path.samefile(a, b)
    except AttributeError:
        return os.path.realpath(a) == os.path.realpath(b)


class TestIntermediateDir(TestCase):
    """
    Tests for intermediate dir (store of .cpp and .o during builds).
    These tests test whether intermediate dir is safe. If it's not,
    new one should be created.
    """
Esempio n. 5
0
    dire.bench()

If you have doctests enabled by default in nose (with a noserc file or
environment variable), and you have a numpy version <= 1.6.1, this will also run
the doctests, let's hope they pass.
"""
import numpy as np
from numpy.random import randn

from ..vec_val_sum import vec_val_vect

from numpy.testing import measure, dec

try:
    np.einsum
except AttributeError:
    with_einsum = dec.skipif(True, "Need einsum for benchmark")
else:
    with_einsum = lambda f : f

@with_einsum
def bench_vec_val_vect():
    # nosetests -s --match '(?:^|[\\b_\\.//-])[Bb]ench'
    repeat = 100
    shape = (100, 100)
    evecs, evals = randn(*(shape + (3, 3))), randn(*(shape + (3,)))
    etime = measure("np.einsum('...ij,...j,...kj->...ik', evecs, evals, evecs)",
                    repeat)
    vtime = measure("vec_val_vect(evecs, evals)", repeat)
    print("einsum %4.2f; vec_val_vect %4.2f" % (etime, vtime))
Esempio n. 6
0
import numpy as np

from scipy.special import genlaguerre, gamma

from ...data import get_gtab_taiwan_dsi
from ..shore import ShoreModel
from ...sims.voxel import MultiTensor

from numpy.testing import (assert_almost_equal, assert_equal, run_module_suite,
                           dec)

from ...utils.optpkg import optional_package
cvxopt, have_cvxopt, _ = optional_package("cvxopt")

needs_cvxopt = dec.skipif(not have_cvxopt)


# Object to hold module global data
class _C(object):
    pass


data = _C()


def setup():
    data.gtab = get_gtab_taiwan_dsi()
    data.mevals = np.array(([0.0015, 0.0003, 0.0003], [0.0015, 0.0003,
                                                       0.0003]))
    data.angl = [(0, 0), (60, 0)]
Esempio n. 7
0
import numpy as np
import itertools

from ..externals.six import BytesIO

from numpy.testing import assert_array_equal, assert_array_almost_equal, dec

# Decorator to skip tests requiring save / load if scipy not available for mat
# files
try:
    import scipy
except ImportError:
    have_scipy = False
else:
    have_scipy = True
scipy_skip = dec.skipif(not have_scipy, 'scipy not available')

from ..spm99analyze import (Spm99AnalyzeHeader, Spm99AnalyzeImage,
                            HeaderTypeError)
from ..casting import type_info, shared_range
from ..volumeutils import apply_read_scaling, _dt_min_max
from ..spatialimages import supported_np_types, HeaderDataError

from nose.tools import assert_true, assert_false, assert_equal, assert_raises

from ..testing import assert_allclose_safely

from . import test_analyze
from .test_helpers import (bytesio_round_trip, bytesio_filemap, bz2_mio_error)

FLOAT_TYPES = np.sctypes['float']
Esempio n. 8
0
def nonfunctional_tooslow(func):
    return dec.skipif(
        True, "    Test not yet functional (too slow), needs more work.")(func)
Esempio n. 9
0
def if_datasource(ds, msg):
    try:
        ds.get_filename()
    except DataError:
        return dec.skipif(True, msg)
    return lambda f: f
Esempio n. 10
0
 def skip_func(func):
     return dec.skipif(True, msg)(func)
Esempio n. 11
0
from scipy.sparse import csc_matrix, spdiags, SparseEfficiencyWarning
from scipy.sparse.linalg import linsolve

warnings.simplefilter('ignore',SparseEfficiencyWarning)

import numpy as np
try:
    import scipy.sparse.linalg.dsolve.umfpack as um
except (ImportError, AttributeError):
    _have_umfpack = False
else:
    _have_umfpack = um.umfpack._um is not None

# Allow disabling of nose tests if umfpack not present
# See end of file for application
_umfpack_skip = dec.skipif(not _have_umfpack,
                           'UMFPACK appears not to be compiled')

class TestSolvers(TestCase):
    """Tests inverting a sparse linear system"""

    def test_solve_complex_without_umfpack(self):
        """Solve: single precision complex"""
        linsolve.use_solver( useUmfpack = False )
        a = self.a.astype('F')
        b = self.b
        x = linsolve.spsolve(a, b)
        #print x
        #print "Error: ", a*x-b
        assert_array_almost_equal(a*x, b, decimal=4)

Esempio n. 12
0
    pass

import numpy as np
from numpy.testing import assert_, assert_raises, dec

import scipy.io
from scipy._lib._tmpdirs import tempdir
import scipy.sparse

# Bit of a hack to keep the test runner from exploding in Python 2.7.
# FileNotFoundError was added in Python 3.3.
if sys.version_info < (3, 3):
    FileNotFoundError = IOError

skip_dec = dec.skipif(
    sys.version_info < (3, 6),
    'Passing path-like objects to IO functions requires Python >= 3.6')


class TestPaths(object):
    data = np.arange(5)

    @skip_dec
    def test_savemat(self):
        with tempdir() as temp_dir:
            path = Path(temp_dir) / 'data.mat'
            scipy.io.savemat(path, {'data': self.data})
            assert_(path.is_file())

    @skip_dec
    def test_loadmat(self):
Esempio n. 13
0
""" Testing doctest markup tests
"""

import sys
from ..py3builder import doctest_markup, byter

from numpy.testing import (assert_array_almost_equal, assert_array_equal, dec)

from nose.tools import assert_true, assert_equal, assert_raises

is_2 = sys.version_info[0] < 3
skip42 = dec.skipif(is_2)

# Tell 23dt processing to pass the rest of this file unchanged.  We don't want
# the processor to mess up the example string
#23dt skip rest

IN_TXT = """

Anonymous lines, also blanks

As all that is empty, use entropy, and endure

# Comment, unchanged

#23dt comment not processed without doctest marker
>>> #23dthere: no whitespace; comment not recognized even as error
>>>#23dt nor without preceding whitespace
>>> #23dt not correct syntax creates error
>>> #23dt novar: 'undefined variable creates error'
>>> #23dt here: 'OK'
Esempio n. 14
0
from __future__ import division, print_function, absolute_import

from numpy.testing import dec

try:
    eval('a @ b')
    has_matmul = True
except:
    has_matmul = False

if has_matmul:
    from jsonctmctree.tests.check_bad_syntax import *

from jsonctmctree.tests.check_good_syntax import *

dec.skipif(True, 'matmul is unavailable')


def test_matmul_stub():
    pass
Esempio n. 15
0
""" Testing doctest markup tests
"""

import sys
from ..py3builder import doctest_markup, byter

from numpy.testing import (assert_array_almost_equal, assert_array_equal, dec)

from nose.tools import assert_true, assert_equal, assert_raises

is_2 = sys.version_info[0] < 3
skip42 = dec.skipif(is_2)

# Tell 23dt processing to pass the rest of this file unchanged.  We don't want
# the processor to mess up the example string
#23dt skip rest

IN_TXT = """

Anonymous lines, also blanks

As all that is empty, use entropy, and endure

# Comment, unchanged

#23dt comment not processed without doctest marker
>>> #23dthere: no whitespace; comment not recognized even as error
>>>#23dt nor without preceding whitespace
>>> #23dt not correct syntax creates error
>>> #23dt novar: 'undefined variable creates error'
>>> #23dt here: 'OK'
Esempio n. 16
0
def check_version(module, min_ver):
    if type(module) == MissingModule:
        return dec.skipif(True, "{} is not installed".format(module.name))
    return dec.skipif(LooseVersion(module.__version__) < LooseVersion(min_ver),
                      "{} version >= {} required".format(module.__name__, min_ver))
Esempio n. 17
0
import numpy as np

from numpy.testing import assert_, assert_equal, \
        dec, decorate_methods, TestCase, run_module_suite

from scipy import misc

try:
    import PIL.Image
except ImportError:
    _have_PIL = False
else:
    _have_PIL = True

# Function / method decorator for skipping PIL tests on import failure
_pilskip = dec.skipif(not _have_PIL, 'Need to import PIL for this test')

datapath = os.path.dirname(__file__)


class TestPILUtil(TestCase):
    def test_imresize(self):
        im = np.random.random((10, 20))
        for T in np.sctypes['float'] + [float]:
            # 1.1 rounds to below 1.1 for float16, 1.101 works
            im1 = misc.imresize(im, T(1.101))
            assert_equal(im1.shape, (11, 22))

    def test_imresize2(self):
        im = np.random.random((20, 30))
        im2 = misc.imresize(im, (30, 40), interp='bicubic')
    import dipy.reconst as dire
    dire.bench()

If you have doctests enabled by default in nose (with a noserc file or
environment variable), and you have a numpy version <= 1.6.1, this will also
run the doctests, let's hope they pass.
"""
import numpy as np
from numpy.random import randn

from dipy.reconst.vec_val_sum import vec_val_vect

from numpy.testing import measure, dec

try:
    np.einsum
except AttributeError:
    with_einsum = dec.skipif(True, "Need einsum for benchmark")
else:
    def with_einsum(f): return f


@with_einsum
def bench_vec_val_vect():
    # nosetests -s --match '(?:^|[\\b_\\.//-])[Bb]ench'
    repeat = 100
    etime = measure("np.einsum('...ij,...j,...kj->...ik', evecs, evals, evecs)",
                    repeat)
    vtime = measure("vec_val_vect(evecs, evals)", repeat)
    print("einsum %4.2f; vec_val_vect %4.2f" % (etime, vtime))
Esempio n. 19
0
from __future__ import division, print_function, absolute_import

import sys

import numpy as np
from numpy.testing import assert_, assert_allclose, dec
import scipy.special.orthogonal as orth

from scipy.lib._version import NumpyVersion
from scipy.special._testutils import FuncData


# Early Numpy versions have bugs in ufunc keyword argument parsing
numpy_version_requirement = dec.skipif(
    NumpyVersion(np.__version__) < '1.6.0' and sys.version_info[0] >= 3,
    "Bug in Numpy < 1.6 on Python 3")


def test_eval_chebyt():
    n = np.arange(0, 10000, 7)
    x = 2*np.random.rand() - 1
    v1 = np.cos(n*np.arccos(x))
    v2 = orth.eval_chebyt(n, x)
    assert_(np.allclose(v1, v2, rtol=1e-15))


def test_eval_genlaguerre_restriction():
    # check it returns nan for alpha <= -1
    assert_(np.isnan(orth.eval_genlaguerre(0, -1, 0)))
    assert_(np.isnan(orth.eval_genlaguerre(0.1, -1, 0)))
Esempio n. 20
0
from statsmodels.compat.python import PY3
import os
from statsmodels.datasets import get_rdataset, webuse, check_internet
from numpy.testing import assert_, assert_array_equal, dec

cur_dir = os.path.dirname(os.path.abspath(__file__))

dec.skipif(PY3, 'Not testable on Python 3.x')
def test_get_rdataset():
    # smoke test
    if not PY3:
        #NOTE: there's no way to test both since the cached files were
        #created with Python 2.x, they're strings, but Python 3 expects
        #bytes and the index file path is hard-coded so both can't live
        #side by side
        duncan = get_rdataset("Duncan", "car", cache=cur_dir)
        assert_(duncan.from_cache)

#internet_available = check_internet()
#@dec.skipif(not internet_available)
def t_est_webuse():
    # test copied and adjusted from iolib/tests/test_foreign
    from statsmodels.iolib.tests.results.macrodata import macrodata_result as res2
    #base_gh = "http://github.com/statsmodels/statsmodels/raw/master/statsmodels/datasets/macrodata/"
    base_gh = "http://www.statsmodels.org/devel/_static/"
    res1 = webuse('macrodata', baseurl=base_gh, as_df=False)
    assert_array_equal(res1 == res2, True)

#@dec.skipif(not internet_available)
def t_est_webuse_pandas():
    # test copied and adjusted from iolib/tests/test_foreign
Esempio n. 21
0
DATA_SDIR = 'fsaverage'

have_freesurfer = False
if 'SUBJECTS_DIR' in os.environ:
    # May have Freesurfer installed with data
    data_path = pjoin(os.environ["SUBJECTS_DIR"], DATA_SDIR)
    have_freesurfer = isdir(data_path)
else:
    # May have nibabel test data submodule checked out
    nib_data = get_nibabel_data()
    if nib_data != '':
        data_path = pjoin(nib_data, 'nitest-freesurfer', DATA_SDIR)
        have_freesurfer = isdir(data_path)

freesurfer_test = dec.skipif(
    not have_freesurfer,
    'cannot find freesurfer {0} directory'.format(DATA_SDIR))


def _hash_file_content(fname):
    hasher = hashlib.md5()
    with open(fname, 'rb') as afile:
        buf = afile.read()
        hasher.update(buf)
    return hasher.hexdigest()


@freesurfer_test
def test_geometry():
    """Test IO of .surf"""
    surf_path = pjoin(data_path, "surf", "%s.%s" % ("lh", "inflated"))
Esempio n. 22
0
from statsmodels.compat.python import PY3
import os
from statsmodels.datasets import get_rdataset, webuse, check_internet
from numpy.testing import assert_, assert_array_equal, dec

cur_dir = os.path.dirname(os.path.abspath(__file__))

dec.skipif(PY3, 'Not testable on Python 3.x')
def test_get_rdataset():
    # smoke test
    if not PY3:
        #NOTE: there's no way to test both since the cached files were
        #created with Python 2.x, they're strings, but Python 3 expects
        #bytes and the index file path is hard-coded so both can't live
        #side by side
        duncan = get_rdataset("Duncan", "car", cache=cur_dir)
        assert_(duncan.from_cache)

#internet_available = check_internet()
#@dec.skipif(not internet_available)
def t_est_webuse():
    # test copied and adjusted from iolib/tests/test_foreign
    from statsmodels.iolib.tests.results.macrodata import macrodata_result as res2
    #base_gh = "http://github.com/statsmodels/statsmodels/raw/master/statsmodels/datasets/macrodata/"
    base_gh = "http://www.statsmodels.org/devel/_static/"
    res1 = webuse('macrodata', baseurl=base_gh, as_df=False)
    assert_array_equal(res1 == res2, True)

#@dec.skipif(not internet_available)
def t_est_webuse_pandas():
    # test copied and adjusted from iolib/tests/test_foreign
Esempio n. 23
0
DATA_SDIR = 'fsaverage'

have_freesurfer = False
if 'SUBJECTS_DIR' in os.environ:
    # May have Freesurfer installed with data
    data_path = pjoin(os.environ["SUBJECTS_DIR"], DATA_SDIR)
    have_freesurfer = isdir(data_path)
else:
    # May have nibabel test data submodule checked out
    nib_data = get_nibabel_data()
    if nib_data != '':
        data_path = pjoin(nib_data, 'nitest-freesurfer', DATA_SDIR)
        have_freesurfer = isdir(data_path)

freesurfer_test = dec.skipif(
    not have_freesurfer,
    'cannot find freesurfer {0} directory'.format(DATA_SDIR))


def _hash_file_content(fname):
    hasher = hashlib.md5()
    with open(fname, 'rb') as afile:
        buf = afile.read()
        hasher.update(buf)
    return hasher.hexdigest()


@freesurfer_test
def test_geometry():
    """Test IO of .surf"""
    surf_path = pjoin(data_path, "surf", "%s.%s" % ("lh", "inflated"))
Esempio n. 24
0
import re
import glob
import string
import stat
import tempfile

from distutils.dir_util import remove_tree

from numpy.testing import TestCase, assert_, dec, run_module_suite
from numpy.testing.noseclasses import KnownFailureTest

from weave import catalog
from weave_test_utils import clear_temp_catalog, restore_temp_catalog, empty_temp_dir, cleanup_temp_dir


skip_on_windows = dec.skipif(sys.platform == "win32", "Test works only on posix")


def samefile(a, b):
    try:
        return os.path.samefile(a, b)
    except AttributeError:
        return os.path.realpath(a) == os.path.realpath(b)


class TestIntermediateDir(TestCase):
    """
    Tests for intermediate dir (store of .cpp and .o during builds).
    These tests test whether intermediate dir is safe. If it's not,
    new one should be created.
    """
Esempio n. 25
0
def mpmath_check(min_ver):
    if mpmath is None:
        return dec.skipif(True, "mpmath is not installed")
    return dec.skipif(
        LooseVersion(mpmath.__version__) < LooseVersion(min_ver),
        "mpmath version >= %s required" % min_ver)
Esempio n. 26
0
from scipy.special import genlaguerre, gamma

from dipy.data import get_gtab_taiwan_dsi
from dipy.reconst.shore import ShoreModel
from dipy.sims.voxel import MultiTensor

from numpy.testing import (assert_almost_equal,
                           assert_equal,
                           run_module_suite,
                           dec)

from dipy.utils.optpkg import optional_package
cvxpy, have_cvxpy, _ = optional_package("cvxpy")

needs_cvxpy = dec.skipif(not have_cvxpy)


# Object to hold module global data
class _C(object):
    pass
data = _C()


def setup():
    data.gtab = get_gtab_taiwan_dsi()
    data.mevals = np.array(([0.0015, 0.0003, 0.0003],
                            [0.0015, 0.0003, 0.0003]))
    data.angl = [(0, 0), (60, 0)]
    data.S, sticks = MultiTensor(
        data.gtab, data.mevals, S0=100.0, angles=data.angl,
Esempio n. 27
0
from __future__ import division, print_function, absolute_import

from distutils.version import LooseVersion
import sys

import numpy as np
from numpy.testing import assert_, assert_allclose, dec
import scipy.special.orthogonal as orth

from scipy.special._testutils import FuncData

# Early Numpy versions have bugs in ufunc keyword argument parsing
numpy_version_requirement = dec.skipif(
    LooseVersion(np.version.version) < LooseVersion('1.6')
    and sys.version_info[0] >= 3,
    "Bug in Numpy < 1.6 on Python 3")

def test_eval_chebyt():
    n = np.arange(0, 10000, 7)
    x = 2*np.random.rand() - 1
    v1 = np.cos(n*np.arccos(x))
    v2 = orth.eval_chebyt(n, x)
    assert_(np.allclose(v1, v2, rtol=1e-15))


def test_warnings():
    # ticket 1334
    olderr = np.seterr(all='raise')
    try:
        # these should raise no fp warnings
        orth.eval_legendre(1, 0)
Esempio n. 28
0
from scipy.sparse import csc_matrix, spdiags, SparseEfficiencyWarning
from scipy.sparse.linalg import linsolve

warnings.simplefilter('ignore',SparseEfficiencyWarning)

import numpy as np
try:
    import scipy.sparse.linalg.dsolve.umfpack as um
except (ImportError, AttributeError):
    _have_umfpack = False
else:
    _have_umfpack = um.umfpack._um is not None

# Allow disabling of nose tests if umfpack not present
# See end of file for application
_umfpack_skip = dec.skipif(not _have_umfpack,
                           'UMFPACK appears not to be compiled')


class _DeprecationAccept:
    def setUp(self):
        self.mgr = WarningManager()
        self.mgr.__enter__()
        warnings.simplefilter("ignore", DeprecationWarning)

    def tearDown(self):
        self.mgr.__exit__()


class TestSolvers(_DeprecationAccept):
    """Tests inverting a sparse linear system"""
Esempio n. 29
0
def mpmath_check(min_ver):
    if mpmath is None:
        return dec.skipif(True, "mpmath is not installed")
    return dec.skipif(LooseVersion(mpmath.__version__) < LooseVersion(min_ver),
                      "mpmath version >= %s required" % min_ver)
Esempio n. 30
0
from __future__ import division, print_function, absolute_import

from numpy.testing import dec

try:
    eval('a @ b')
    has_matmul = True
except:
    has_matmul = False

if has_matmul:
    from jsonctmctree.tests.check_bad_syntax import *

from jsonctmctree.tests.check_good_syntax import *

dec.skipif(True, 'matmul is unavailable')
def test_matmul_stub():
    pass
Esempio n. 31
0
def platform_skip(func):
    return dec.skipif(skip_complex_tests,
        "Numpy is using complex functions (e.g. sqrt) provided by your"
        "platform's C library. However, they do not seem to behave according"
        "to C99 -- so C99 tests are skipped.")(func)
Esempio n. 32
0
 def skip_func(func):
     return dec.skipif(True, msg)(func)
Esempio n. 33
0
import os.path
import numpy as np

from numpy.testing import assert_, assert_equal, \
        dec, decorate_methods, TestCase, run_module_suite

try:
    import PIL.Image
except ImportError:
    _have_PIL = False
else:
    _have_PIL = True
    import scipy.misc.pilutil as pilutil

# Function / method decorator for skipping PIL tests on import failure
_pilskip = dec.skipif(not _have_PIL, 'Need to import PIL for this test')

datapath = os.path.dirname(__file__)

class TestPILUtil(TestCase):
    def test_imresize(self):
        im = np.random.random((10,20))
        for T in np.sctypes['float'] + [float]:
            im1 = pilutil.imresize(im,T(1.1))
            assert_equal(im1.shape,(11,22))

    def test_imresize2(self):
        im = np.random.random((20,30))
        im2 = pilutil.imresize(im, (30,40), interp='bicubic')
        assert_equal(im2.shape, (30,40))
Esempio n. 34
0
def if_datasource(ds, msg):
    try:
        ds.get_filename()
    except DataError:
        return dec.skipif(True, msg)
    return lambda f: f
Esempio n. 35
0
def platform_skip(func):
    return dec.skipif(skip_complex_tests,
        "Numpy is using complex functions (e.g. sqrt) provided by your"
        "platform's C library. However, they do not seem to behave according"
        "to C99 -- so C99 tests are skipped.")(func)
Esempio n. 36
0
"""
import warnings
import numpy as np
from numpy.testing import (assert_array_equal, assert_array_almost_equal,
                           assert_raises, assert_array_less, run_module_suite,
                           assert_, assert_equal, dec)
from dipy.testing import assert_greater_equal

from dipy.reconst.ivim import ivim_prediction, IvimModel
from dipy.core.gradients import gradient_table, generate_bvecs
from dipy.sims.voxel import multi_tensor

from dipy.utils.optpkg import optional_package

cvxpy, have_cvxpy, _ = optional_package("cvxpy")
needs_cvxpy = dec.skipif(not have_cvxpy)


def setup_module():
    global gtab, ivim_fit_single, ivim_model_LM, data_single, params_LM, \
        data_multi, ivim_params_LM, D_star, D, f, S0, gtab_with_multiple_b0, \
        noisy_single, mevals, gtab_no_b0, ivim_fit_multi, ivim_model_VP, \
        f_VP, D_star_VP, D_VP, params_VP

    # Let us generate some data for testing.
    bvals = np.array([0., 10., 20., 30., 40., 60., 80., 100.,
                      120., 140., 160., 180., 200., 300., 400.,
                      500., 600., 700., 800., 900., 1000.])
    N = len(bvals)
    bvecs = generate_bvecs(N)
    gtab = gradient_table(bvals, bvecs.T, b0_threshold=0)
Esempio n. 37
0
import numpy as np

from ..py3k import BytesIO

from numpy.testing import assert_array_equal, assert_array_almost_equal, dec

# Decorator to skip tests requiring save / load if scipy not available for mat
# files
try:
    import scipy
except ImportError:
    have_scipy = False
else:
    have_scipy = True
scipy_skip = dec.skipif(not have_scipy, 'scipy not available')

from ..spm99analyze import (Spm99AnalyzeHeader, Spm99AnalyzeImage,
                            HeaderTypeError)

from ..testing import (assert_equal, assert_true, assert_false, assert_raises)

from . import test_analyze
from .test_analyze import _log_chk


class TestSpm99AnalyzeHeader(test_analyze.TestAnalyzeHeader):
    header_class = Spm99AnalyzeHeader

    def test_empty(self):
        super(TestSpm99AnalyzeHeader, self).test_empty()
Esempio n. 38
0
from scipy.special import genlaguerre, gamma

from ...data import get_gtab_taiwan_dsi
from ..shore import ShoreModel
from ...sims.voxel import MultiTensor

from numpy.testing import (assert_almost_equal,
                           assert_equal,
                           run_module_suite,
                           dec)

from ...utils.optpkg import optional_package
cvxopt, have_cvxopt, _ = optional_package("cvxopt")

needs_cvxopt = dec.skipif(not have_cvxopt)


# Object to hold module global data
class _C(object):
    pass
data = _C()


def setup():
    data.gtab = get_gtab_taiwan_dsi()
    data.mevals = np.array(([0.0015, 0.0003, 0.0003],
                            [0.0015, 0.0003, 0.0003]))
    data.angl = [(0, 0), (60, 0)]
    data.S, sticks = MultiTensor(
        data.gtab, data.mevals, S0=100.0, angles=data.angl,