예제 #1
0
def _set_matplotlib_backend():
    """Check if we have custom backend to set and it is different
    from current one
    """
    backend = cfg.get('matplotlib', 'backend')
    if backend:
        import matplotlib as mpl
        mpl_backend = mpl.get_backend().lower()
        if mpl_backend != backend.lower():
            if __debug__:
                debug('EXT_', "Trying to set matplotlib backend to %s" % backend)
            mpl.use(backend)
            import warnings
            # And disable useless warning from matplotlib in the future
            warnings.filterwarnings(
                'ignore', 'This call to matplotlib.use() has no effect.*',
                UserWarning)
        elif __debug__:
            debug('EXT_',
                  "Not trying to set matplotlib backend to %s since it was "
                  "already set" % backend)
예제 #2
0
def _get_verbosity(verbosity):
    if verbosity is None:
        return int(cfg.get('tests', 'verbosity', default=1))
    return verbosity
예제 #3
0
def _get_verbosity(verbosity):
    if verbosity is None:
        return int(cfg.get('tests', 'verbosity', default=1))
    return verbosity
예제 #4
0
파일: rsa_fmri.py 프로젝트: hanke/PyMVPA
import numpy as np
import pylab as pl
from os.path import join as pjoin
from mvpa2 import cfg

"""
In this example we use a dataset from :ref:`Haxby et al. (2001) <HGF+01>` were
participants watched pictures of eight different visual objects, while fMRI was
recorded. The following snippet load a portion of this dataset (single subject)
from regions on the ventral and occipital surface of the brain.
"""

# load dataset -- ventral and occipital ROIs
from mvpa2.datasets.sources.native import load_tutorial_data
datapath = pjoin(cfg.get('location', 'tutorial data'), 'haxby2001')
ds = load_tutorial_data(roi=(15, 16, 23, 24, 36, 38, 39, 40, 48))

"""
We only do minimal pre-processing: linear trend removal and Z-scoring all voxel
time-series with respect to the mean and standard deviation of the "rest"
condition.
"""

# only minial detrending
from mvpa2.mappers.detrend import poly_detrend
poly_detrend(ds, polyord=1, chunks_attr='chunks')
# z-scoring with respect to the 'rest' condition
from mvpa2.mappers.zscore import zscore
zscore(ds, chunks_attr='chunks', param_est=('targets', 'rest'))
# now remove 'rest' samples
예제 #5
0
        def do_sweep(*args_, **kwargs_):
            """Perform sweeping over provided keyword arguments
            """
            def untrain_clf(argvalue):
                """Little helper"""
                if isinstance(argvalue, Classifier):
                    # clear classifier after its use -- just to be sure ;-)
                    argvalue.params.retrainable = False
                    argvalue.untrain()

            failed_tests = {}
            skipped_tests = []
            report_progress = cfg.get('tests', 'verbosity', default=1) > 1
            for argname in kwargs.keys():
                for argvalue in kwargs[argname]:
                    if isinstance(argvalue, Classifier):
                        # clear classifier before its use
                        argvalue.untrain()
                    if isinstance(argvalue, ClassWithCollections):
                        argvalue.ca.reset()
                    # update kwargs_
                    kwargs_[argname] = argvalue
                    # do actual call
                    try:
                        if __debug__:
                            debug(
                                'TEST', 'Running %s on args=%r and kwargs=%r' %
                                (method.__name__, args_, kwargs_))
                        method(*args_, **kwargs_)
                        status = '+'
                    except SkipTest, e:
                        skipped_tests += [e]
                        status = 'S'
                    except AssertionError, e:
                        status = 'F'
                        estr = str(e)
                        etype, value, tb = sys.exc_info()
                        # literal representation of exception tb, so
                        # we could group them later on
                        eidstr = '  '.join([
                            l for l in tbm.format_exception(etype, value, tb)
                            if not ('do_sweep' in l or 'unittest.py' in l
                                    or 'AssertionError' in l
                                    or 'Traceback (most' in l)
                        ])

                        # Store exception information for later on groupping
                        if not eidstr in failed_tests:
                            failed_tests[eidstr] = []

                        sargvalue = str(argvalue)
                        if not (__debug__ and 'TEST' in debug.active):
                            # by default lets make it of sane length
                            if len(sargvalue) > 100:
                                sargvalue = sargvalue[:95] + ' ...'
                        failed_tests[eidstr].append(
                            # skip top-most tb in sweep_args
                            (argname, sargvalue, tb.tb_next, estr))

                        if __debug__:
                            msg = "%s on %s=%s" % (estr, argname, argvalue)
                            debug('TEST',
                                  'Failed unittest: %s\n%s' % (eidstr, msg))
                    if report_progress:
                        sys.stdout.write(status)
                        sys.stdout.flush()

                    untrain_clf(argvalue)
                    # TODO: handle different levels of unittests properly
                    if cfg.getboolean('tests', 'quick', False):
                        # on TESTQUICK just run test for 1st entry in the list,
                        # the rest are omitted
                        # TODO: proper partitioning of unittests
                        break
예제 #6
0
        def do_sweep(*args_, **kwargs_):
            """Perform sweeping over provided keyword arguments
            """
            def untrain_clf(argvalue):
                """Little helper"""
                if isinstance(argvalue, Classifier):
                    # clear classifier after its use -- just to be sure ;-)
                    argvalue.params.retrainable = False
                    argvalue.untrain()

            failed_tests = {}
            skipped_tests = []
            report_progress = cfg.get('tests', 'verbosity', default=1) > 1
            for argname in list(kwargs.keys()):
                for argvalue in kwargs[argname]:
                    if isinstance(argvalue, Classifier):
                        # clear classifier before its use
                        argvalue.untrain()
                    if isinstance(argvalue, ClassWithCollections):
                        argvalue.ca.reset()
                    # update kwargs_
                    kwargs_[argname] = argvalue
                    # do actual call
                    try:
                        if __debug__:
                            debug(
                                'TEST', 'Running %s on args=%r and kwargs=%r' %
                                (method.__name__, args_, kwargs_))
                        method(*args_, **kwargs_)
                        status = '+'
                    except SkipTest as e:
                        skipped_tests += [e]
                        status = 'S'
                    except AssertionError as e:
                        status = 'F'
                        estr = str(e)
                        etype, value, tb = sys.exc_info()
                        # literal representation of exception tb, so
                        # we could group them later on
                        eidstr = '  '.join([
                            l for l in tbm.format_exception(etype, value, tb)
                            if not ('do_sweep' in l or 'unittest.py' in l
                                    or 'AssertionError' in l
                                    or 'Traceback (most' in l)
                        ])

                        # Store exception information for later on groupping
                        if not eidstr in failed_tests:
                            failed_tests[eidstr] = []

                        sargvalue = safe_str(argvalue)
                        if not (__debug__ and 'TEST' in debug.active):
                            # by default lets make it of sane length
                            if len(sargvalue) > 100:
                                sargvalue = sargvalue[:95] + ' ...'
                        failed_tests[eidstr].append(
                            # skip top-most tb in sweep_args
                            (argname, sargvalue, tb.tb_next, estr))

                        if __debug__:
                            msg = "%s on %s=%s" % (estr, argname,
                                                   safe_str(argvalue))
                            debug('TEST',
                                  'Failed unittest: %s\n%s' % (eidstr, msg))
                    if report_progress:
                        sys.stdout.write(status)
                        sys.stdout.flush()

                    untrain_clf(argvalue)
                    # TODO: handle different levels of unittests properly
                    if cfg.getboolean('tests', 'quick', False):
                        # on TESTQUICK just run test for 1st entry in the list,
                        # the rest are omitted
                        # TODO: proper partitioning of unittests
                        break
            if report_progress:
                sys.stdout.write(' ')
                sys.stdout.flush()
            if len(failed_tests):
                # Lets now create a single AssertionError exception
                # which would nicely incorporate all failed exceptions
                multiple = len(failed_tests) != 1  # is it unique?
                # if so, we don't need to reinclude traceback since it
                # would be spitted out anyways below
                estr = ""
                cestr = "lead to failures of unittest %s" % method.__name__
                if multiple:
                    estr += "\n Different scenarios %s "\
                            "(specific tracebacks are below):" % cestr
                else:
                    estr += "\n Single scenario %s:" % cestr
                for ek, els in failed_tests.items():
                    estr += '\n'
                    if multiple:
                        estr += ek
                    estr += "  on\n    %s" % (
                        "    ".join([
                            "%s=%s%s\n" % (
                                ea,
                                eav,
                                # Why didn't I just do regular for loop? ;)
                                ":\n     ".join(
                                    [xx for xx in [' ', es] if xx != '']))
                            for ea, eav, etb, es in els
                        ]))
                    # take first one... they all should be identical
                    etb = els[0][2]
                raise AssertionError(estr).with_traceback(etb)
            if len(skipped_tests):
                # so if nothing has failed, lets at least report that some were
                # skipped -- for now just  a simple SkipTest message
                raise SkipTest("%d tests were skipped in testing %s" %
                               (len(skipped_tests), method.__name__))
예제 #7
0
import numpy as np
import pylab as pl
import os
from os.path import join as pjoin
from mvpa2 import cfg

"""
In this example we use a dataset from Haxby et al. (2001) were participants watched pictures of eight different visual objects, while fMRI was recorded. The following snippet load a portion of this dataset (single subject) from regions on the ventral and occipital surface of the brain.
"""

# load dataset -- ventral and occipital ROIs
from mvpa2.datasets.sources.native import load_tutorial_data
#'/home/lab/Desktop/PyMVPA-master/mvpa2/data/'
#datapath = '/usr/lib/python2.7/dist-packages/mvpa2/data/haxby2001'
datapath = pjoin(cfg.get('location', 'tutorial data'), 'haxby2001')
ds = load_tutorial_data(path = '/usr/lib/python2.7/dist-packages/mvpa2/data',roi=(15, 16, 23, 24, 36, 38, 39, 40, 48))

"""
We only do minimal pre-processing: linear trend removal and Z-scoring all voxel time-series with respect to the mean and standard deviation of the “rest” condition.
"""

# only minimal detrending
from mvpa2.mappers.detrend import poly_detrend
poly_detrend(ds, polyord=1, chunks_attr='chunks')
# z-scoring with respect to the 'rest' condition
from mvpa2.mappers.zscore import zscore
zscore(ds, chunks_attr='chunks', param_est=('targets', 'rest'))
# now remove 'rest' samples
ds = ds[ds.sa.targets != 'rest']
예제 #8
0
파일: sweep.py 프로젝트: Arthurkorn/PyMVPA
        def do_sweep(*args_, **kwargs_):
            """Perform sweeping over provided keyword arguments
            """
            def untrain_clf(argvalue):
                """Little helper"""
                if isinstance(argvalue, Classifier):
                    # clear classifier after its use -- just to be sure ;-)
                    argvalue.params.retrainable = False
                    argvalue.untrain()

            failed_tests = {}
            skipped_tests = []
            report_progress = cfg.get('tests', 'verbosity', default=1) > 1
            for argname in kwargs.keys():
                for argvalue in kwargs[argname]:
                    if isinstance(argvalue, Classifier):
                        # clear classifier before its use
                        argvalue.untrain()
                    if isinstance(argvalue, ClassWithCollections):
                        argvalue.ca.reset()
                    # update kwargs_
                    kwargs_[argname] = argvalue
                    # do actual call
                    try:
                        if __debug__:
                            debug('TEST', 'Running %s on args=%r and kwargs=%r'
                                  % (method.__name__, args_, kwargs_))
                        method(*args_, **kwargs_)
                        status = '+'
                    except SkipTest, e:
                        skipped_tests += [e]
                        status = 'S'
                    except AssertionError, e:
                        status = 'F'
                        estr = str(e)
                        etype, value, tb = sys.exc_info()
                        # literal representation of exception tb, so
                        # we could group them later on
                        eidstr = '  '.join(
                            [l for l in tbm.format_exception(etype, value, tb)
                             if not ('do_sweep' in l
                                     or 'unittest.py' in l
                                     or 'AssertionError' in l
                                     or 'Traceback (most' in l)])

                        # Store exception information for later on groupping
                        if not eidstr in failed_tests:
                            failed_tests[eidstr] = []

                        sargvalue = str(argvalue)
                        if not (__debug__ and 'TEST' in debug.active):
                            # by default lets make it of sane length
                            if len(sargvalue) > 100:
                                sargvalue = sargvalue[:95] + ' ...'
                        failed_tests[eidstr].append(
                            # skip top-most tb in sweep_args
                            (argname, sargvalue, tb.tb_next, estr))

                        if __debug__:
                            msg = "%s on %s=%s" % (estr, argname, argvalue)
                            debug('TEST', 'Failed unittest: %s\n%s'
                                  % (eidstr, msg))
                    if report_progress:
                        sys.stdout.write(status)
                        sys.stdout.flush()

                    untrain_clf(argvalue)
                    # TODO: handle different levels of unittests properly
                    if cfg.getboolean('tests', 'quick', False):
                        # on TESTQUICK just run test for 1st entry in the list,
                        # the rest are omitted
                        # TODO: proper partitioning of unittests
                        break
예제 #9
0
def run(limit=None, verbosity=None, exit_=False):
    """Runs the full or a subset of the PyMVPA unittest suite.

    Parameters
    ----------
    limit : None or list
      If None, the full test suite is run. Alternatively, a list with test IDs
      can be provides. IDs are the base filenames of the test implementation,
      e.g. the ID for the suite in 'mvpa2/tests/test_niftidataset.py' is
      'niftidataset'.
    verbosity : None or int
      Verbosity of unittests execution. If None, controlled by PyMVPA
      configuration tests/verbosity.  Values >=3 enable all Python,
      and PyMVPA warnings, >=4 adds NumPy warnings, >=5 -- nose debug info.
    exit_ : bool, optional
      Either to exit with an error code upon the completion.
    """
    if __debug__:
        from mvpa2.base import debug
        # Lets add some targets which provide additional testing
        debug.active += ['CHECK_.*']

    if verbosity is None:
        verbosity = int(cfg.get('tests', 'verbosity', default=1))

    # provide people with a hint about the warnings that might show up in a
    # second
    if verbosity:
        print("T: MVPA_SEED=%s" % _random_seed)
        if verbosity > 1:
            print('T: Testing for availability of external software packages.')

    # So we could see all warnings about missing dependencies
    maxcount = warning.maxcount
    warning.maxcount = 1000

    # fully test of externals
    externals.test_all_dependencies(verbosity=max(0, verbosity-1))

    if verbosity < 3:
        # no MVPA warnings during whole testsuite (but restore handlers later on)
        handler_backup = warning.handlers
        warning.handlers = []

        # No python warnings (like ctypes version for slmr)
        import warnings
        warnings.simplefilter('ignore')

    if verbosity < 4:
        # No NumPy
        np_errsettings = np.geterr()
        np.seterr(**dict([(x, 'ignore') for x in np_errsettings]))

    try:
        if externals.exists('nose'):
            # Lets just use nose
            run_tests_using_nose(limit=limit,
                                 verbosity=verbosity,
                                 exit_=exit_)
        else:
            print("T: Warning -- major bulk of tests is skipped since nose "
                  "is unavailable")
            # collect all tests
            suites = collect_test_suites(verbosity=verbosity)

            if limit is None:
                # make global test suite (use them all)
                ts = unittest.TestSuite(suites.values())
            else:
                ts = unittest.TestSuite([suites[s] for s in limit])


            class TextTestRunnerPyMVPA(unittest.TextTestRunner):
                """Extend TextTestRunner to print out random seed which was
                used in the case of failure"""
                def run(self, test):
                    """Run the bloody test and puke the seed value if failed"""
                    result = super(TextTestRunnerPyMVPA, self).run(test)
                    if not result.wasSuccessful():
                        print "MVPA_SEED=%s" % _random_seed

            # finally run it
            TextTestRunnerPyMVPA(verbosity=verbosity).run(ts)
    finally:
        # restore warning handlers
        warning.maxcount = maxcount

    if verbosity < 3:
        # restore warning handlers
        warning.handlers = handler_backup

    if verbosity < 4:
        # restore numpy settings
        np.seterr(**np_errsettings)