Ejemplo n.º 1
0
    def test_null_dist_prob(self, null):
        """Testing null dist probability"""
        if not isinstance(null, NullDist):
            return
        ds = datasets['uni2small']

        null.fit(OneWayAnova(), ds)

        # check reasonable output.
        # p-values for non-bogus features should significantly different,
        # while bogus (0) not
        prob = null.p([20, 0, 0, 0, 0, np.nan])
        # XXX this is labile! it also needs checking since the F-scores
        # of the MCNullDists using normal distribution are apparently not
        # distributed that way, hence the test often (if not always) fails.
        if cfg.getboolean('tests', 'labile', default='yes'):
            self.assertTrue(np.abs(prob[0]) < 0.05,
                            msg="Expected small p, got %g" % prob[0])
        if cfg.getboolean('tests', 'labile', default='yes'):
            self.assertTrue((np.abs(prob[1:]) > 0.05).all(),
                            msg="Bogus features should have insignificant p."
                            " Got %s" % (np.abs(prob[1:]),))

        # has to have matching shape
        if not isinstance(null, FixedNullDist):
            # Fixed dist is univariate ATM so it doesn't care
            # about dimensionality and gives 1 output value
            self.assertRaises(ValueError, null.p, [5, 3, 4])
Ejemplo n.º 2
0
    def test_null_dist_prob(self, null):
        """Testing null dist probability"""
        if not isinstance(null, NullDist):
            return
        ds = datasets['uni2small']

        null.fit(OneWayAnova(), ds)

        # check reasonable output.
        # p-values for non-bogus features should significantly different,
        # while bogus (0) not
        prob = null.p([20, 0, 0, 0, 0, np.nan])
        # XXX this is labile! it also needs checking since the F-scores
        # of the MCNullDists using normal distribution are apparently not
        # distributed that way, hence the test often (if not always) fails.
        if cfg.getboolean('tests', 'labile', default='yes'):
            self.assertTrue(np.abs(prob[0]) < 0.05,
                            msg="Expected small p, got %g" % prob[0])
        if cfg.getboolean('tests', 'labile', default='yes'):
            self.assertTrue((np.abs(prob[1:]) > 0.05).all(),
                            msg="Bogus features should have insignificant p."
                            " Got %s" % (np.abs(prob[1:]), ))

        # has to have matching shape
        if not isinstance(null, FixedNullDist):
            # Fixed dist is univariate ATM so it doesn't care
            # about dimensionality and gives 1 output value
            self.assertRaises(ValueError, null.p, [5, 3, 4])
Ejemplo n.º 3
0
    def test_simple_som(self):
        colors = np.array([[0., 0., 0.], [0., 0., 1.], [0., 1., 0.],
                          [1., 0., 0.], [0., 1., 1.], [1., 0., 1.],
                          [1., 1., 0.], [1., 1., 1.]])

        distance_measures = (None, lambda x, y:(x ** 3 + y ** 3) ** (1. / 3))

        for distance_measure in distance_measures:
            # only small SOM for speed reasons
            som = SimpleSOMMapper((10, 5), 200, learning_rate=0.05)

            # no acces when nothing is there
            self.assertRaises(RuntimeError, som._access_kohonen)

            som.train(colors)

            fmapped = som.forward(colors)
            self.assertTrue(fmapped.shape == (8, 2))

            # reverse mapping
            rmapped = som.reverse(fmapped)

            if cfg.getboolean('tests', 'labile', default='yes'):
                # should approximately restore the input, but could fail
                # with bad initialisation
                self.assertTrue((np.round(rmapped) == colors).all())
Ejemplo n.º 4
0
    def test_simple_som(self):
        colors = np.array([[0., 0., 0.], [0., 0., 1.], [0., 1., 0.],
                           [1., 0., 0.], [0., 1., 1.], [1., 0., 1.],
                           [1., 1., 0.], [1., 1., 1.]])

        distance_measures = (None, lambda x, y: (x**3 + y**3)**(1. / 3))

        for distance_measure in distance_measures:
            # only small SOM for speed reasons
            som = SimpleSOMMapper((10, 5), 200, learning_rate=0.05)

            # no acces when nothing is there
            self.assertRaises(RuntimeError, som._access_kohonen)

            som.train(colors)

            fmapped = som.forward(colors)
            self.assertTrue(fmapped.shape == (8, 2))

            # reverse mapping
            rmapped = som.reverse(fmapped)

            if cfg.getboolean('tests', 'labile', default='yes'):
                # should approximately restore the input, but could fail
                # with bad initialisation
                self.assertTrue((np.round(rmapped) == colors).all())
Ejemplo n.º 5
0
def __check_rpy():
    """Check either rpy is available and also set it for the sane execution
    """
    import rpy
    if not cfg.getboolean('rpy', 'interactive', default=True) \
           and (rpy.get_rpy_input() is rpy.rpy_io.rpy_input):
        if __debug__:
            debug('EXT_',
                  "RPy: providing dummy callback for input to return '1'")

        def input1(*args):
            return "1"      # which is "1: abort (with core dump, if enabled)"
        rpy.set_rpy_input(input1)
Ejemplo n.º 6
0
def test_glmnet_r():
    # not the perfect dataset with which to test, but
    # it will do for now.
    #data = datasets['dumb2']
    # for some reason the R code fails with the dumb data
    data = datasets['chirp_linear']

    clf = GLMNET_R()

    clf.train(data)

    # prediction has to be almost perfect
    # test with a correlation
    pre = clf.predict(data.samples)
    corerr = corr_error(pre, data.targets)
    if cfg.getboolean('tests', 'labile', default='yes'):
        assert_true(corerr < .2)
Ejemplo n.º 7
0
def teardown_module(module, verbosity=None):
    "tear down test fixtures"
    verbosity = _get_verbosity(verbosity)

    # restore warning handlers
    warning.maxcount = _sys_settings['maxcount']

    if verbosity < 3:
        # restore warning handlers
        warning.handlers = _sys_settings['handlers']

    if verbosity < 4:
        # restore numpy settings
        np.seterr(**_sys_settings['np_errsettings'])

    if cfg.getboolean('tests', 'wtf', default='no'):
        sys.stderr.write(str(wtf()))
Ejemplo n.º 8
0
def teardown_module(module, verbosity=None):
    "tear down test fixtures"
    verbosity = _get_verbosity(verbosity)

    # restore warning handlers
    warning.maxcount = _sys_settings['maxcount']

    if verbosity < 3:
        # restore warning handlers
        warning.handlers = _sys_settings['handlers']

    if verbosity < 4:
        # restore numpy settings
        np.seterr(**_sys_settings['np_errsettings'])

    if cfg.getboolean('tests', 'wtf', default='no'):
        sys.stderr.write(str(wtf()))
Ejemplo n.º 9
0
    def test_enet(self):
        # not the perfect dataset with which to test, but
        # it will do for now.
        #data = datasets['dumb2']
        # for some reason the R code fails with the dumb data
        data = datasets['chirp_linear']

        clf = ENET()

        clf.train(data)

        # prediction has to be almost perfect
        # test with a correlation
        pre = clf.predict(data.samples)
        cor = pearsonr(pre, data.targets)
        if cfg.getboolean('tests', 'labile', default='yes'):
            self.assertTrue(cor[0] > .8)
Ejemplo n.º 10
0
    def test_enet(self):
        # not the perfect dataset with which to test, but
        # it will do for now.
        #data = datasets['dumb2']
        # for some reason the R code fails with the dumb data
        data = datasets['chirp_linear']

        clf = ENET()

        clf.train(data)

        # prediction has to be almost perfect
        # test with a correlation
        pre = clf.predict(data.samples)
        cor = pearsonr(pre, data.targets)
        if cfg.getboolean('tests', 'labile', default='yes'):
            self.assertTrue(cor[0] > .8)
Ejemplo n.º 11
0
def test_glmnet_r():
    # not the perfect dataset with which to test, but
    # it will do for now.
    #data = datasets['dumb2']
    # for some reason the R code fails with the dumb data
    data = datasets['chirp_linear']

    clf = GLMNET_R()

    clf.train(data)

    # prediction has to be almost perfect
    # test with a correlation
    pre = clf.predict(data.samples)
    corerr = corr_error(pre, data.targets)
    if cfg.getboolean('tests', 'labile', default='yes'):
        assert_true(corerr < .2)
Ejemplo n.º 12
0
def __check_rpy():
    """Check either rpy is available and also set it for the sane execution
    """
    #import rpy_options
    #rpy_options.set_options(VERBOSE=False, SETUP_READ_CONSOLE=False) # SETUP_WRITE_CONSOLE=False)
    #rpy_options.set_options(VERBOSE=False, SETUP_WRITE_CONSOLE=False) # SETUP_WRITE_CONSOLE=False)
    #    if not cfg.get('rpy', 'read_console', default=False):
    #        print "no read"
    #        rpy_options.set_options(SETUP_READ_CONSOLE=False)
    #    if not cfg.get('rpy', 'write_console', default=False):
    #        print "no write"
    #        rpy_options.set_options(SETUP_WRITE_CONSOLE=False)
    import rpy
    if not cfg.getboolean('rpy', 'interactive', default=True) \
           and (rpy.get_rpy_input() is rpy.rpy_io.rpy_input):
        if __debug__:
            debug('EXT_', "RPy: providing dummy callback for input to return '1'")
        def input1(*args): return "1"      # which is "1: abort (with core dump, if enabled)"
        rpy.set_rpy_input(input1)
Ejemplo n.º 13
0
def check_all_dependencies(force=False, verbosity=1):
    """
    Test for all known dependencies.

    Parameters
    ----------
    force : boolean
      Whether to force the test even if it has already been
      performed.

    """
    # loop over all known dependencies
    for dep in _KNOWN:
        if not exists(dep, force):
            if verbosity:
                warning("%s is not available." % dep)

    if __debug__:
        debug('EXT', 'The following optional externals are present: %s'
                     % [k[5:] for k in cfg.options('externals')
                        if k.startswith('have')
                        and cfg.getboolean('externals', k)])
Ejemplo n.º 14
0
    def test_anova(self):
        """Do some extended testing of OneWayAnova

        in particular -- compound estimation
        """

        m = OneWayAnova()  # default must be not compound ?
        mc = CompoundOneWayAnova()
        ds = datasets['uni2medium']

        # For 2 labels it must be identical for both and equal to
        # simple OneWayAnova
        a, ac = m(ds), mc(ds)

        self.assertTrue(a.shape == (1, ds.nfeatures))
        self.assertTrue(ac.shape == (len(ds.UT), ds.nfeatures))

        assert_array_equal(ac[0], ac[1])
        assert_array_equal(a, ac[1])

        # check for p-value attrs
        if externals.exists('scipy'):
            assert_true('fprob' in a.fa.keys())
            assert_equal(len(ac.fa), len(ac))

        ds = datasets['uni4large']
        ac = mc(ds)
        if cfg.getboolean('tests', 'labile', default='yes'):
            # All non-bogus features must be high for a corresponding feature
            self.assertTrue(
                (ac.samples[np.arange(4),
                            np.array(ds.a.nonbogus_features)] >= 1).all())
        # All features should have slightly but different CompoundAnova
        # values. I really doubt that there will be a case when this
        # test would fail just to being 'labile'
        self.assertTrue(np.max(np.std(ac, axis=1)) > 0,
                        msg='In compound anova, we should get different'
                        ' results for different labels. Got %s' % ac)
Ejemplo n.º 15
0
    def test_anova(self):
        """Do some extended testing of OneWayAnova

        in particular -- compound estimation
        """

        m = OneWayAnova()               # default must be not compound ?
        mc = CompoundOneWayAnova()
        ds = datasets['uni2medium']

        # For 2 labels it must be identical for both and equal to
        # simple OneWayAnova
        a, ac = m(ds), mc(ds)

        self.assertTrue(a.shape == (1, ds.nfeatures))
        self.assertTrue(ac.shape == (len(ds.UT), ds.nfeatures))

        assert_array_equal(ac[0], ac[1])
        assert_array_equal(a, ac[1])

        # check for p-value attrs
        if externals.exists('scipy'):
            assert_true('fprob' in a.fa.keys())
            assert_equal(len(ac.fa), len(ac))

        ds = datasets['uni4large']
        ac = mc(ds)
        if cfg.getboolean('tests', 'labile', default='yes'):
            # All non-bogus features must be high for a corresponding feature
            self.assertTrue((ac.samples[np.arange(4),
                                        np.array(ds.a.nonbogus_features)] >= 1
                                        ).all())
        # All features should have slightly but different CompoundAnova
        # values. I really doubt that there will be a case when this
        # test would fail just to being 'labile'
        self.assertTrue(np.max(np.std(ac, axis=1)) > 0,
                        msg='In compound anova, we should get different'
                        ' results for different labels. Got %s' % ac)
Ejemplo n.º 16
0
      Dimensionality of target space
    data : array, optional
      Some data (should have rank high enough) to derive
      rotation
    """
    if nt is None:
        nt = ns
    # figure out some "random" rotation
    d = max(ns, nt)
    if data is None:
        data = np.random.normal(size=(d*10, d))
    _u, _s, _vh = np.linalg.svd(data[:, :d])
    R = _vh[:ns, :nt]
    if ns == nt:
        # Test if it is indeed a rotation matrix ;)
        # Lets flip first axis if necessary
        if np.linalg.det(R) < 0:
            R[:, 0] *= -1.0
    return R

datasets = generate_testing_datasets(specs)

if cfg.getboolean('tests', 'use hdf datasets', False):
    if not externals.exists('h5py'):
        raise RuntimeError(
            "Cannot perform HDF5 dump of all datasets in the warehouse, "
            "because 'h5py' is not available")

    datasets = saveload_warehouse()
    print "Replaced all dataset warehouse for HDF5 loaded alternative."
Ejemplo n.º 17
0
                    error_str = ". Caught exception was: " + str(e)
                else:
                    raise
        finally:
            # And restore warnings
            np.seterr(**old_handling)

        if __debug__:
            debug('EXT', "Presence of %s is%s verified%s" %
                  (dep, {True:'', False:' NOT'}[result], error_str))

    if not result:
        if raise_:
            raise exception("Required external '%s' was not found" % dep)
        if issueWarning is not None \
               and cfg.getboolean('externals', 'issue warning', True):
            if issueWarning is True:
                warning("Required external '%s' was not found" % dep)
            else:
                warning(issueWarning)

    # store result in config manager
    if not cfg.has_section('externals'):
        cfg.add_section('externals')
    if result:
        cfg.set('externals', 'have ' + dep, 'yes')
    else:
        cfg.set('externals', 'have ' + dep, 'no')

    return result
Ejemplo n.º 18
0
    # store the whole datasets warehouse in one hdf5 file
    hdf = h5py.File(pathjoin(tempdir, 'myhdf5.hdf5'), 'w')
    for d in datasets:
        obj2hdf(hdf, datasets[d], d)
    hdf.close()

    hdf = h5py.File(pathjoin(tempdir, 'myhdf5.hdf5'), 'r')
    rc_ds = {}
    for d in hdf:
        rc_ds[d] = hdf2obj(hdf[d])
    hdf.close()

    #cleanup temp dir
    shutil.rmtree(tempdir, ignore_errors=True)

    # return the reconstructed datasets (for use in datasets warehouse)
    return rc_ds


datasets = generate_testing_datasets(specs)

if cfg.getboolean('tests', 'use hdf datasets', False):
    if not externals.exists('h5py'):
        raise RuntimeError(
            "Cannot perform HDF5 dump of all datasets in the warehouse, "
            "because 'h5py' is not available")

    datasets = saveload_warehouse()
    print "Replaced all dataset warehouse for HDF5 loaded alternative."
Ejemplo n.º 19
0
def collect_nose_tests(verbosity=1):
    """Return list of tests which are pure nose-based
    """
    tests = [
        # Basic data structures/manipulators
        'test_base',
        'test_collections',
        'test_attrmap',

        # Datasets
        'test_datasetng',
        'test_datasetfx',
        'test_dataset_formats',
        'test_splitter',
        'test_generators',
        'test_niftidataset',
        'test_eepdataset',
        'test_erdataset',
        'test_datasrcs',

        # Classifiers
        'test_multiclf',

        # Misc supporting
        'test_neighborhood',
        'test_stats',
        'test_stats_sp',

        # Mappers
        'test_mapper',
        'test_mapper_sp',
        'test_arraymapper',
        'test_boxcarmapper',
        'test_prototypemapper',
        'test_fxmapper',
        'test_zscoremapper',
        'test_waveletmapper',
        'test_mdp',
        'test_filters',
        'test_staticprojection',

        # Learners
        'test_enet',
        'test_spam',
        'test_lars',
        'test_glmnet',
        'test_kernel',
        'test_svmkernels',
        'test_senses',

        # Algorithms
        'test_emp_null',
        'test_clfcrossval',

        # IO
        'test_iohelpers',
        'test_hdf5',
        'test_hdf5_clf',

        # Measures
        'test_transerror',
        'test_datameasure',
        'test_dcov',

        # Misc
        'test_misc',
        'test_errorfx',
        'test_testing',
        'test_usecases',
        'test_surfing',
        'test_surfing_afni',
        'test_surfing_voxelselection'
    ]

    if not cfg.getboolean('tests', 'lowmem', default='no'):
        tests += ['test_atlases']

    return tests
Ejemplo n.º 20
0
def enhanced_doc_string(item, *args, **kwargs):
    """Generate enhanced doc strings for various items.

    Parameters
    ----------
    item : str or class
      What object requires enhancing of documentation
    *args : list
      Includes base classes to look for parameters, as well, first item
      must be a dictionary of locals if item is given by a string
    force_extend : bool
      Either to force looking for the documentation in the parents.
      By default force_extend = False, and lookup happens only if kwargs
      is one of the arguments to the respective function (e.g. item.__init__)
    skip_params : list of str
      List of parameters (in addition to [kwargs]) which should not
      be added to the documentation of the class.

    It is to be used from a collector, ie whenever class is already created
    """
    # Handling of arguments
    if len(kwargs):
        if set(kwargs.keys()).issubset(set(['force_extend'])):
            raise ValueError, "Got unknown keyword arguments (smth among %s)" \
                  " in enhanced_doc_string." % kwargs
    force_extend = kwargs.get('force_extend', False)
    skip_params = kwargs.get('skip_params', [])

    # XXX make it work also not only with classes but with methods as well
    if isinstance(item, basestring):
        if len(args)<1 or not isinstance(args[0], dict):
            raise ValueError, \
                  "Please provide locals for enhanced_doc_string of %s" % item
        name = item
        lcl = args[0]
        args = args[1:]
    elif hasattr(item, "im_class"):
        # bound method
        raise NotImplementedError, \
              "enhanced_doc_string is not yet implemented for methods"
    elif hasattr(item, "__name__"):
        name = item.__name__
        lcl = item.__dict__
    else:
        raise ValueError, "Don't know how to extend docstring for %s" % item

    # check whether docstring magic is requested or not
    if not cfg.getboolean('doc', 'pimp docstrings', True):
        return  lcl['__doc__']

    if __debug__:
        debug('DOCH', 'Processing docstrings of %s' % name)

    #return lcl['__doc__']
    rst_lvlmarkup = ["=", "-", "_"]

    # would then be called for any child... ok - ad hoc for SVM???
    if hasattr(item, '_customize_doc') and name=='SVM':
        item._customize_doc()

    initdoc = ""
    if lcl.has_key('__init__'):
        func = lcl['__init__']
        initdoc = func.__doc__

        skip_params += lcl.get('__init__doc__exclude__', [])

        # either to extend arguments
        # do only if kwargs is one of the arguments
        # in python 2.5 args are no longer in co_names but in varnames
        extend_args = force_extend or \
                      'kwargs' in (func.func_code.co_names +
                                   func.func_code.co_varnames)

        if __debug__ and not extend_args:
            debug('DOCH',
                  'Not extending parameters for __init__ of  %s',
                  (name,))

        if initdoc is None:
            initdoc = "Initialize instance of %s" % name

        initdoc, params, suffix = _split_out_parameters(initdoc)
        params_list = _parse_parameters(params)

        known_params = set([i[0] for i in params_list])

        # If there are additional ones:
        if lcl.has_key('_paramsdoc'):
            params_list += [i for i in lcl['_paramsdoc']
                            if not (i[0] in known_params)]
            known_params = set([i[0] for i in params_list])

        # no need for placeholders
        skip_params = set(skip_params + ['kwargs', '**kwargs'])

        # XXX we do evil check here, refactor code to separate
        #     regressions out of the classifiers, and making
        #     retrainable flag not available for those classes which
        #     can't actually do retraining. Although it is not
        #     actually that obvious for Meta Classifiers
        if hasattr(item, '__tags__'):
            clf_internals = item.__tags__
            skip_params.update([i for i in ('retrainable',)
                                if not (i in clf_internals)])

        known_params.update(skip_params)
        if extend_args:
            # go through all the parents and obtain their init parameters
            parent_params_list = []
            for i in args:
                if hasattr(i, '__init__'):
                    # XXX just assign within a class to don't redo without need
                    initdoc_ = i.__init__.__doc__
                    if initdoc_ is None:
                        continue
                    splits_ = _split_out_parameters(initdoc_)
                    params_ = splits_[1]
                    parent_params_list += _parse_parameters(params_.lstrip())

            # extend with ones which are not known to current init
            for i, v in parent_params_list:
                if not (i in known_params):
                    params_list += [(i, v)]
                    known_params.update([i])

        # if there are parameters -- populate the list
        if len(params_list):
            params_ = '\n'.join([i[1].rstrip() for i in params_list
                                 if not i[0] in skip_params])
            initdoc += "\n\n%s\n" \
                       % _rst_section('Parameters') + _indent(params_)

        if suffix != "":
            initdoc += "\n\n" + suffix

        initdoc = handle_docstring(initdoc)

        # Finally assign generated doc to the constructor
        lcl['__init__'].__doc__ = initdoc

    docs = [ handle_docstring(lcl['__doc__']) ]

    # Optionally populate the class documentation with it
    if __add_init2doc and initdoc != "":
        docs += [ _rst_underline('Constructor information for `%s` class'
                                 % name, rst_lvlmarkup[2]),
                  initdoc ]

    # Add information about the ca if available
    if lcl.has_key('_cadoc') and len(item._cadoc):
        # to don't conflict with Notes section if such was already
        # present
        lcldoc = lcl['__doc__'] or ''
        if not 'Notes' in lcldoc:
            section_name = _rst_section('Notes')
        else:
            section_name = '\n'         # just an additional newline
        # no indent is necessary since ca list must be already indented
        docs += ['%s\nAvailable conditional attributes:' % section_name,
                 handle_docstring(item._cadoc)]

    # Deprecated -- but actually we might like to have it in ipython
    # mode may be?
    if False: #len(args):
        bc_intro = _rst('  ') + 'Please refer to the documentation of the ' \
                   'base %s for more information:' \
                   % (single_or_plural('class', 'classes', len(args)))

        docs += ['\n' + _rst_section('See Also'),
                 bc_intro,
                 '  ' + ',\n  '.join(['%s%s.%s%s%s' % (_rst(':class:`~'),
                                                      i.__module__,
                                                      i.__name__,
                                                     _rst('`'),
                                                      _rst_sep)
                                      for i in args])
                ]

    itemdoc = '\n\n'.join(docs)
    # remove some bogus new lines -- never 3 empty lines in doc are useful
    result = re.sub("\s*\n\s*\n\s*\n", "\n\n", itemdoc)

    return result
Ejemplo n.º 21
0
        def do_sweep(*args_, **kwargs_):
            """Perform sweeping over provided keyword arguments
            """
            def untrain_clf(argvalue):
                """Little helper"""
                if isinstance(argvalue, Classifier):
                    # clear classifier after its use -- just to be sure ;-)
                    argvalue.params.retrainable = False
                    argvalue.untrain()

            failed_tests = {}
            skipped_tests = []
            report_progress = cfg.get('tests', 'verbosity', default=1) > 1
            for argname in list(kwargs.keys()):
                for argvalue in kwargs[argname]:
                    if isinstance(argvalue, Classifier):
                        # clear classifier before its use
                        argvalue.untrain()
                    if isinstance(argvalue, ClassWithCollections):
                        argvalue.ca.reset()
                    # update kwargs_
                    kwargs_[argname] = argvalue
                    # do actual call
                    try:
                        if __debug__:
                            debug(
                                'TEST', 'Running %s on args=%r and kwargs=%r' %
                                (method.__name__, args_, kwargs_))
                        method(*args_, **kwargs_)
                        status = '+'
                    except SkipTest as e:
                        skipped_tests += [e]
                        status = 'S'
                    except AssertionError as e:
                        status = 'F'
                        estr = str(e)
                        etype, value, tb = sys.exc_info()
                        # literal representation of exception tb, so
                        # we could group them later on
                        eidstr = '  '.join([
                            l for l in tbm.format_exception(etype, value, tb)
                            if not ('do_sweep' in l or 'unittest.py' in l
                                    or 'AssertionError' in l
                                    or 'Traceback (most' in l)
                        ])

                        # Store exception information for later on groupping
                        if not eidstr in failed_tests:
                            failed_tests[eidstr] = []

                        sargvalue = safe_str(argvalue)
                        if not (__debug__ and 'TEST' in debug.active):
                            # by default lets make it of sane length
                            if len(sargvalue) > 100:
                                sargvalue = sargvalue[:95] + ' ...'
                        failed_tests[eidstr].append(
                            # skip top-most tb in sweep_args
                            (argname, sargvalue, tb.tb_next, estr))

                        if __debug__:
                            msg = "%s on %s=%s" % (estr, argname,
                                                   safe_str(argvalue))
                            debug('TEST',
                                  'Failed unittest: %s\n%s' % (eidstr, msg))
                    if report_progress:
                        sys.stdout.write(status)
                        sys.stdout.flush()

                    untrain_clf(argvalue)
                    # TODO: handle different levels of unittests properly
                    if cfg.getboolean('tests', 'quick', False):
                        # on TESTQUICK just run test for 1st entry in the list,
                        # the rest are omitted
                        # TODO: proper partitioning of unittests
                        break
            if report_progress:
                sys.stdout.write(' ')
                sys.stdout.flush()
            if len(failed_tests):
                # Lets now create a single AssertionError exception
                # which would nicely incorporate all failed exceptions
                multiple = len(failed_tests) != 1  # is it unique?
                # if so, we don't need to reinclude traceback since it
                # would be spitted out anyways below
                estr = ""
                cestr = "lead to failures of unittest %s" % method.__name__
                if multiple:
                    estr += "\n Different scenarios %s "\
                            "(specific tracebacks are below):" % cestr
                else:
                    estr += "\n Single scenario %s:" % cestr
                for ek, els in failed_tests.items():
                    estr += '\n'
                    if multiple:
                        estr += ek
                    estr += "  on\n    %s" % (
                        "    ".join([
                            "%s=%s%s\n" % (
                                ea,
                                eav,
                                # Why didn't I just do regular for loop? ;)
                                ":\n     ".join(
                                    [xx for xx in [' ', es] if xx != '']))
                            for ea, eav, etb, es in els
                        ]))
                    # take first one... they all should be identical
                    etb = els[0][2]
                raise AssertionError(estr).with_traceback(etb)
            if len(skipped_tests):
                # so if nothing has failed, lets at least report that some were
                # skipped -- for now just  a simple SkipTest message
                raise SkipTest("%d tests were skipped in testing %s" %
                               (len(skipped_tests), method.__name__))
Ejemplo n.º 22
0
# plot the histogram
plot_bars(H.T, xloc=bin_left, width=bin_width, yerr='std')

# show the Gaussians
x = np.linspace(0, 1, 100)
# first gaussian
pl.plot(x, params[0] * norm.pdf(x, params[1], params[2]), "r-", zorder=2)
pl.axvline(params[1], color='r', linestyle='--', alpha=0.6)
# second gaussian
pl.plot(x, params[3] * norm.pdf(x, params[4], params[5]), "b-", zorder=3)
pl.axvline(params[4], color='b', linestyle='--', alpha=0.6)
# dual gaussian
pl.plot(x, dual_gaussian(x, *params), "k--", alpha=0.5, zorder=1)
pl.xlim(0, 1)
pl.ylim(ymin=0)

pl.title('Dual Gaussian fit of searchlight accuracies')

if cfg.getboolean('examples', 'interactive', True):
    # show the cool figures
    pl.show()

"""
And this is how it looks like.

.. image:: ../pics/ex_curvefitting_searchlight.*
   :align: center
   :alt: Dual Gaussian fit of searchlight accuracies

"""
Ejemplo n.º 23
0
        def do_sweep(*args_, **kwargs_):
            """Perform sweeping over provided keyword arguments
            """
            def untrain_clf(argvalue):
                """Little helper"""
                if isinstance(argvalue, Classifier):
                    # clear classifier after its use -- just to be sure ;-)
                    argvalue.params.retrainable = False
                    argvalue.untrain()

            failed_tests = {}
            skipped_tests = []
            for argname in kwargs.keys():
                for argvalue in kwargs[argname]:
                    if isinstance(argvalue, Classifier):
                        # clear classifier before its use
                        argvalue.untrain()
                    if isinstance(argvalue, ClassWithCollections):
                        argvalue.ca.reset()
                    # update kwargs_
                    kwargs_[argname] = argvalue
                    # do actual call
                    try:
                        if __debug__:
                            debug('TEST', 'Running %s on args=%r and kwargs=%r'
                                  % (method.__name__, args_, kwargs_))
                        method(*args_, **kwargs_)
                    except SkipTest, e:
                        skipped_tests += [e]
                    except AssertionError, e:
                        estr = str(e)
                        etype, value, tb = sys.exc_info()
                        # literal representation of exception tb, so
                        # we could group them later on
                        eidstr = '  '.join(
                            [l for l in tbm.format_exception(etype, value, tb)
                             if not ('do_sweep' in l
                                     or 'unittest.py' in l
                                     or 'AssertionError' in l
                                     or 'Traceback (most' in l)])

                        # Store exception information for later on groupping
                        if not eidstr in failed_tests:
                            failed_tests[eidstr] = []

                        sargvalue = str(argvalue)
                        if not (__debug__ and 'TEST' in debug.active):
                            # by default lets make it of sane length
                            if len(sargvalue) > 100:
                                sargvalue = sargvalue[:95] + ' ...'
                        failed_tests[eidstr].append(
                            # skip top-most tb in sweep_args
                            (argname, sargvalue, tb.tb_next, estr))

                        if __debug__:
                            msg = "%s on %s=%s" % (estr, argname, argvalue)
                            debug('TEST', 'Failed unittest: %s\n%s'
                                  % (eidstr, msg))
                    untrain_clf(argvalue)
                    # TODO: handle different levels of unittests properly
                    if cfg.getboolean('tests', 'quick', False):
                        # on TESTQUICK just run test for 1st entry in the list,
                        # the rest are omitted
                        # TODO: proper partitioning of unittests
                        break
Ejemplo n.º 24
0
def collect_nose_tests(verbosity=1):
    """Return tests which are purely nose-based (now it is actually a mix)
    """

    tests = [
        # Basic data structures/manipulators
        'test_base',
        'test_collections',
        'test_attrmap',
        'test_constraints',

        # Datasets
        'test_bids',
        'test_giftidataset',
        'test_datasetng',
        'test_datasetfx',
        'test_dataset_formats',
        'test_splitter',
        'test_generators',
        'test_niftidataset',
        'test_cosmo',
        'test_eepdataset',
        'test_erdataset',
        'test_datasrcs',

        # Classifiers
        'test_multiclf',
        'test_smlr',

        # Misc supporting
        'test_neighborhood',
        'test_stats',
        'test_stats_sp',

        # Mappers
        'test_mapper',
        'test_mapper_sp',
        'test_arraymapper',
        'test_boxcarmapper',
        'test_prototypemapper',
        'test_fxmapper',
        'test_zscoremapper',
        'test_waveletmapper',
        'test_mdp',
        'test_filters',
        'test_staticprojection',

        # Learners
        'test_compound',
        'test_enet',
        'test_glmmapper',
        'test_lars',
        'test_glmnet',
        'test_kernel',
        'test_svmkernels',
        'test_senses',

        # Algorithms
        'test_emp_null',
        'test_clfcrossval',
        'test_group_clusterthr',

        # IO
        'test_iohelpers',
        'test_hdf5',
        'test_hdf5_clf',
        'test_openfmri',

        # Measures
        'test_transerror',
        'test_datameasure',
        'test_dcov',
        'test_corrstability',
        'test_fxmeasure',
        'test_rsa',

        # Misc
        'test_cmdline_ttest',
        'test_lib_afni',
        'test_misc_scatter',
        'test_misc',
        'test_errorfx',
        'test_testing',
        'test_usecases',
        'test_surfing',
        'test_surfing_afni',
        'test_surfing_voxelselection',
        'test_surfing_surface',
        'test_eeglab',
        'test_progress',
        'test_winner',
        'test_viz',
    ]
    """Return list of tests which are pure nose-based
    """

    if not cfg.getboolean('tests', 'lowmem', default='no'):
        tests += ['test_atlases']

    return tests
Ejemplo n.º 25
0
def collect_nose_tests(verbosity=1):
    """Return list of tests which are pure nose-based
    """
    tests = [
        # Basic data structures/manipulators
        'test_base',
        'test_collections',
        'test_attrmap',

        # Datasets
        'test_datasetng',
        'test_datasetfx',
        'test_dataset_formats',
        'test_splitter',
        'test_generators',
        'test_niftidataset',
        'test_eepdataset',
        'test_erdataset',
        'test_datasrcs',

        # Classifiers
        'test_multiclf',

        # Misc supporting
        'test_neighborhood',
        'test_stats',
        'test_stats_sp',

        # Mappers
        'test_mapper',
        'test_mapper_sp',
        'test_arraymapper',
        'test_boxcarmapper',
        'test_prototypemapper',
        'test_fxmapper',
        'test_zscoremapper',
        'test_waveletmapper',
        'test_mdp',
        'test_filters',
        'test_staticprojection',

        # Learners
        'test_enet',
        'test_spam',
        'test_lars',
        'test_glmnet',
        'test_kernel',
        'test_svmkernels',
        'test_senses',

        # Algorithms
        'test_emp_null',
        'test_clfcrossval',

        # IO
        'test_iohelpers',
        'test_hdf5',
        'test_hdf5_clf',

        # Measures
        'test_transerror',
        'test_datameasure',
        'test_dcov',

        # Misc
        'test_misc',
        'test_errorfx',
        'test_testing',
        'test_usecases',
        'test_surfing',
        'test_surfing_afni',
        'test_surfing_voxelselection',
        'test_eeglab'
        ]

    if not cfg.getboolean('tests', 'lowmem', default='no'):
        tests += ['test_atlases']

    return tests
Ejemplo n.º 26
0
def enhanced_doc_string(item, *args, **kwargs):
    """Generate enhanced doc strings for various items.

    Parameters
    ----------
    item : str or class
      What object requires enhancing of documentation
    *args : list
      Includes base classes to look for parameters, as well, first item
      must be a dictionary of locals if item is given by a string
    force_extend : bool
      Either to force looking for the documentation in the parents.
      By default force_extend = False, and lookup happens only if kwargs
      is one of the arguments to the respective function (e.g. item.__init__)
    skip_params : list of str
      List of parameters (in addition to [kwargs]) which should not
      be added to the documentation of the class.

    It is to be used from a collector, ie whenever class is already created
    """
    # Handling of arguments
    if len(kwargs):
        if set(kwargs.keys()).issubset(set(['force_extend'])):
            raise ValueError, "Got unknown keyword arguments (smth among %s)" \
                  " in enhanced_doc_string." % kwargs
    force_extend = kwargs.get('force_extend', False)
    skip_params = kwargs.get('skip_params', [])

    # XXX make it work also not only with classes but with methods as well
    if isinstance(item, basestring):
        if len(args)<1 or not isinstance(args[0], dict):
            raise ValueError, \
                  "Please provide locals for enhanced_doc_string of %s" % item
        name = item
        lcl = args[0]
        args = args[1:]
    elif hasattr(item, "im_class"):
        # bound method
        raise NotImplementedError, \
              "enhanced_doc_string is not yet implemented for methods"
    elif hasattr(item, "__name__"):
        name = item.__name__
        lcl = item.__dict__
    else:
        raise ValueError, "Don't know how to extend docstring for %s" % item

    # check whether docstring magic is requested or not
    if not cfg.getboolean('doc', 'pimp docstrings', True):
        return  lcl['__doc__']

    if __debug__:
        debug('DOCH', 'Processing docstrings of %s' % name)

    #return lcl['__doc__']
    rst_lvlmarkup = ["=", "-", "_"]

    # would then be called for any child... ok - ad hoc for SVM???
    if hasattr(item, '_customize_doc') and name=='SVM':
        item._customize_doc()

    initdoc = ""
    if lcl.has_key('__init__'):
        func = lcl['__init__']
        initdoc = func.__doc__

        skip_params += lcl.get('__init__doc__exclude__', [])

        # either to extend arguments
        # do only if kwargs is one of the arguments
        # in python 2.5 args are no longer in co_names but in varnames
        extend_args = force_extend or \
                      'kwargs' in (func.func_code.co_names +
                                   func.func_code.co_varnames)

        if __debug__ and not extend_args:
            debug('DOCH',
                  'Not extending parameters for __init__ of  %s',
                  (name,))

        if initdoc is None:
            initdoc = "Initialize instance of %s" % name

        initdoc, params, suffix = _split_out_parameters(initdoc)
        params_list = _parse_parameters(params)

        known_params = set([i[0] for i in params_list])

        # If there are additional ones:
        if lcl.has_key('_paramsdoc'):
            params_list += [i for i in lcl['_paramsdoc']
                            if not (i[0] in known_params)]
            known_params = set([i[0] for i in params_list])

        # no need for placeholders
        skip_params = set(skip_params + ['kwargs', '**kwargs'])

        # XXX we do evil check here, refactor code to separate
        #     regressions out of the classifiers, and making
        #     retrainable flag not available for those classes which
        #     can't actually do retraining. Although it is not
        #     actually that obvious for Meta Classifiers
        if hasattr(item, '__tags__'):
            clf_internals = item.__tags__
            skip_params.update([i for i in ('retrainable',)
                                if not (i in clf_internals)])

        known_params.update(skip_params)
        if extend_args:
            # go through all the parents and obtain their init parameters
            parent_params_list = []
            for i in args:
                if hasattr(i, '__init__'):
                    # XXX just assign within a class to don't redo without need
                    initdoc_ = i.__init__.__doc__
                    if initdoc_ is None:
                        continue
                    splits_ = _split_out_parameters(initdoc_)
                    params_ = splits_[1]
                    parent_params_list += _parse_parameters(params_.lstrip())

            # extend with ones which are not known to current init
            for i, v in parent_params_list:
                if not (i in known_params):
                    params_list += [(i, v)]
                    known_params.update([i])

        # if there are parameters -- populate the list
        if len(params_list):
            params_ = '\n'.join([i[1].rstrip() for i in params_list
                                 if not i[0] in skip_params])
            initdoc += "\n\n%s\n" \
                       % _rst_section('Parameters') + _indent(params_)

        if suffix != "":
            initdoc += "\n\n" + suffix

        initdoc = handle_docstring(initdoc)

        # Finally assign generated doc to the constructor
        lcl['__init__'].__doc__ = initdoc

    docs = [ handle_docstring(lcl['__doc__']) ]

    # Optionally populate the class documentation with it
    if __add_init2doc and initdoc != "":
        docs += [ _rst_underline('Constructor information for `%s` class'
                                 % name, rst_lvlmarkup[2]),
                  initdoc ]

    # Add information about the ca if available
    if lcl.has_key('_cadoc') and len(item._cadoc):
        # to don't conflict with Notes section if such was already
        # present
        lcldoc = lcl['__doc__'] or ''
        if not 'Notes' in lcldoc:
            section_name = _rst_section('Notes')
        else:
            section_name = '\n'         # just an additional newline
        # no indent is necessary since ca list must be already indented
        docs += ['%s\nAvailable conditional attributes:' % section_name,
                 handle_docstring(item._cadoc)]

    # Deprecated -- but actually we might like to have it in ipython
    # mode may be?
    if False: #len(args):
        bc_intro = _rst('  ') + 'Please refer to the documentation of the ' \
                   'base %s for more information:' \
                   % (single_or_plural('class', 'classes', len(args)))

        docs += ['\n' + _rst_section('See Also'),
                 bc_intro,
                 '  ' + ',\n  '.join(['%s%s.%s%s%s' % (_rst(':class:`~'),
                                                      i.__module__,
                                                      i.__name__,
                                                     _rst('`'),
                                                      _rst_sep)
                                      for i in args])
                ]

    itemdoc = '\n\n'.join(docs)
    # remove some bogus new lines -- never 3 empty lines in doc are useful
    result = re.sub("\s*\n\s*\n\s*\n", "\n\n", itemdoc)

    return result
Ejemplo n.º 27
0
def collect_nose_tests(verbosity=1):
    """Return tests which are purely nose-based (now it is actually a mix)
    """

    tests = [
        # Basic data structures/manipulators
        'test_base',
        'test_collections',
        'test_attrmap',
        'test_constraints',

        # Datasets
        'test_bids',
        'test_giftidataset',
        'test_datasetng',
        'test_datasetfx',
        'test_dataset_formats',
        'test_splitter',
        'test_generators',
        'test_niftidataset',
        'test_cosmo',
        'test_eepdataset',
        'test_erdataset',
        'test_datasrcs',

        # Classifiers
        'test_multiclf',
        'test_smlr',

        # Misc supporting
        'test_neighborhood',
        'test_stats',
        'test_stats_sp',

        # Mappers
        'test_mapper',
        'test_mapper_sp',
        'test_arraymapper',
        'test_boxcarmapper',
        'test_prototypemapper',
        'test_fxmapper',
        'test_zscoremapper',
        'test_waveletmapper',
        'test_mdp',
        'test_filters',
        'test_staticprojection',

        # Learners
        'test_compound',
        'test_enet',
        'test_glmmapper',
        'test_lars',
        'test_glmnet',
        'test_kernel',
        'test_svmkernels',
        'test_senses',

        # Algorithms
        'test_emp_null',
        'test_clfcrossval',
        'test_group_clusterthr',

        # IO
        'test_iohelpers',
        'test_hdf5',
        'test_hdf5_clf',
        'test_openfmri',

        # Measures
        'test_transerror',
        'test_datameasure',
        'test_dcov',
        'test_corrstability',
        'test_fxmeasure',
        'test_rsa',

        # Misc
        'test_cmdline_ttest',
        'test_lib_afni',
        'test_misc_scatter',
        'test_misc_plot',
        'test_misc',
        'test_errorfx',
        'test_testing',
        'test_usecases',
        'test_surfing',
        'test_surfing_afni',
        'test_surfing_voxelselection',
        'test_surfing_surface',
        'test_eeglab',
        'test_progress',
        'test_winner',
        'test_viz',
        ]
    """Return list of tests which are pure nose-based
    """

    if not cfg.getboolean('tests', 'lowmem', default='no'):
        tests += ['test_atlases']

    return tests
Ejemplo n.º 28
0
def exists(dep, force=False, raise_=False, issueWarning=None,
           exception=RuntimeError):
    """
    Test whether a known dependency is installed on the system.

    This method allows us to test for individual dependencies without
    testing all known dependencies. It also ensures that we only test
    for a dependency once.

    Parameters
    ----------
    dep : string or list of string
      The dependency key(s) to test.
    force : boolean
      Whether to force the test even if it has already been
      performed.
    raise_ : boolean, str
      Whether to raise an exception if dependency is missing.
      If True, it is still conditioned on the global setting
      MVPA_EXTERNALS_RAISE_EXCEPTION, while would raise exception
      if missing despite the configuration if 'always'.
    issueWarning : string or None or True
      If string, warning with given message would be thrown.
      If True, standard message would be used for the warning
      text.
    exception : exception, optional
      What exception to raise.  Defaults to RuntimeError
    """
    # if we are provided with a list of deps - go through all of them
    if isinstance(dep, (list, tuple)):
        results = [ exists(dep_, force, raise_) for dep_ in dep ]
        return bool(reduce(lambda x, y: x and y, results, True))

    # where to look in cfg
    cfgid = 'have ' + dep

    # pre-handle raise_ according to the global settings and local argument
    if isinstance(raise_, str):
        if raise_.lower() == 'always':
            raise_ = True
        else:
            raise ValueError("Unknown value of raise_=%s. "
                             "Must be bool or 'always'" % raise_)
    else: # must be bool conditioned on the global settings
        raise_ = (raise_
                  and cfg.getboolean('externals', 'raise exception', True))

    # prevent unnecessary testing
    if cfg.has_option('externals', cfgid) \
       and not cfg.getboolean('externals', 'retest', default='no') \
       and not force:
        if __debug__:
            debug('EXT', "Skip retesting for '%s'." % dep)

        # check whether an exception should be raised, even though the external
        # was already tested previously
        if not cfg.getboolean('externals', cfgid) and raise_:
            raise exception("Required external '%s' was not found" % dep)
        return cfg.getboolean('externals', cfgid)

    # determine availability of external (non-cached)

    # default to 'not found'
    result = False

    if dep not in _KNOWN:
        raise ValueError("%r is not a known dependency key." % (dep,))
    else:
        # try and load the specific dependency
        if __debug__:
            debug('EXT', "Checking for the presence of %s" % dep)

        # Exceptions which are silently caught while running tests for externals
        _caught_exceptions = [ImportError, AttributeError, RuntimeError]

        try:
            # Suppress NumPy warnings while testing for externals
            old_handling = np.seterr(all="ignore")

            error_str = ''
            try:
                exec _KNOWN[dep]
                result = True
            except tuple(_caught_exceptions), e:
                error_str = ". Caught exception was: " + str(e)
            except Exception, e:
                # Add known ones by their names so we don't need to
                # actually import anything manually to get those classes
                if e.__class__.__name__ in ['RPy_Exception', 'RRuntimeError',
                                            'RPy_RException']:
                    _caught_exceptions += [e.__class__]
                    error_str = ". Caught exception was: " + str(e)
                else:
                    raise
Ejemplo n.º 29
0
# plot the histogram
plot_bars(H.T, xloc=bin_left, width=bin_width, yerr='std')

# show the Gaussians
x = np.linspace(0, 1, 100)
# first gaussian
pl.plot(x, params[0] * norm.pdf(x, params[1], params[2]), "r-", zorder=2)
pl.axvline(params[1], color='r', linestyle='--', alpha=0.6)
# second gaussian
pl.plot(x, params[3] * norm.pdf(x, params[4], params[5]), "b-", zorder=3)
pl.axvline(params[4], color='b', linestyle='--', alpha=0.6)
# dual gaussian
pl.plot(x, dual_gaussian(x, *params), "k--", alpha=0.5, zorder=1)
pl.xlim(0, 1)
pl.ylim(ymin=0)

pl.title('Dual Gaussian fit of searchlight accuracies')

if cfg.getboolean('examples', 'interactive', True):
    # show the cool figures
    pl.show()
"""
And this is how it looks like.

.. image:: ../pics/ex_curvefitting_searchlight.*
   :align: center
   :alt: Dual Gaussian fit of searchlight accuracies

"""
Ejemplo n.º 30
0
        def do_sweep(*args_, **kwargs_):
            """Perform sweeping over provided keyword arguments
            """
            def untrain_clf(argvalue):
                """Little helper"""
                if isinstance(argvalue, Classifier):
                    # clear classifier after its use -- just to be sure ;-)
                    argvalue.params.retrainable = False
                    argvalue.untrain()

            failed_tests = {}
            skipped_tests = []
            report_progress = cfg.get('tests', 'verbosity', default=1) > 1
            for argname in kwargs.keys():
                for argvalue in kwargs[argname]:
                    if isinstance(argvalue, Classifier):
                        # clear classifier before its use
                        argvalue.untrain()
                    if isinstance(argvalue, ClassWithCollections):
                        argvalue.ca.reset()
                    # update kwargs_
                    kwargs_[argname] = argvalue
                    # do actual call
                    try:
                        if __debug__:
                            debug(
                                'TEST', 'Running %s on args=%r and kwargs=%r' %
                                (method.__name__, args_, kwargs_))
                        method(*args_, **kwargs_)
                        status = '+'
                    except SkipTest, e:
                        skipped_tests += [e]
                        status = 'S'
                    except AssertionError, e:
                        status = 'F'
                        estr = str(e)
                        etype, value, tb = sys.exc_info()
                        # literal representation of exception tb, so
                        # we could group them later on
                        eidstr = '  '.join([
                            l for l in tbm.format_exception(etype, value, tb)
                            if not ('do_sweep' in l or 'unittest.py' in l
                                    or 'AssertionError' in l
                                    or 'Traceback (most' in l)
                        ])

                        # Store exception information for later on groupping
                        if not eidstr in failed_tests:
                            failed_tests[eidstr] = []

                        sargvalue = str(argvalue)
                        if not (__debug__ and 'TEST' in debug.active):
                            # by default lets make it of sane length
                            if len(sargvalue) > 100:
                                sargvalue = sargvalue[:95] + ' ...'
                        failed_tests[eidstr].append(
                            # skip top-most tb in sweep_args
                            (argname, sargvalue, tb.tb_next, estr))

                        if __debug__:
                            msg = "%s on %s=%s" % (estr, argname, argvalue)
                            debug('TEST',
                                  'Failed unittest: %s\n%s' % (eidstr, msg))
                    if report_progress:
                        sys.stdout.write(status)
                        sys.stdout.flush()

                    untrain_clf(argvalue)
                    # TODO: handle different levels of unittests properly
                    if cfg.getboolean('tests', 'quick', False):
                        # on TESTQUICK just run test for 1st entry in the list,
                        # the rest are omitted
                        # TODO: proper partitioning of unittests
                        break