def test_repr(): attr_repr = "SampleAttribute(name='TestAttr', doc='my own test', " \ "value=array([0, 1, 2, 3, 4]), length=None)" sattr = SampleAttribute(name='TestAttr', doc='my own test', value=np.arange(5)) # check precise formal representation ok_(repr(sattr) == attr_repr) # check that it actually works as a Python expression from numpy import array eattr = eval(repr(sattr)) ok_(repr(eattr), attr_repr) # should also work for a simple dataset # Does not work due to bug in numpy: # python -c "from numpy import *; print __version__; r=repr(array(['s', None])); print r; eval(r)" # would give "array([s, None], dtype=object)" without '' around s #ds = datasets['uni2small'] ds = Dataset([[0, 1]], a={'dsa1': 'v1'}, sa={'targets': [0]}, fa={'targets': ['b', 'n']}) ds_repr = repr(ds) cfg_repr = cfg.get('datasets', 'repr', 'full') if cfg_repr == 'full': ok_(repr(eval(ds_repr)) == ds_repr) elif cfg_repr == 'str': ok_(str(ds) == ds_repr) else: raise AssertionError('Unknown kind of datasets.repr configuration %r' % cfg_repr)
from mvpa.clfs.blr import BLR clfswh += RegressionAsClassifier(BLR(descr="BLR()"), descr="BLR Classifier") #PLR from mvpa.clfs.plr import PLR clfswh += PLR(descr="PLR()") if externals.exists('scipy'): clfswh += PLR(reduced=0.05, descr="PLR(reduced=0.01)") # SVM stuff if len(clfswh['linear', 'svm']) > 0: linearSVMC = clfswh['linear', 'svm', cfg.get('svm', 'backend', default='libsvm').lower() ][0] # "Interesting" classifiers clfswh += \ FeatureSelectionClassifier( linearSVMC.clone(), SensitivityBasedFeatureSelection( SMLRWeights(SMLR(lm=0.1, implementation="C"), postproc=maxofabs_sample()), RangeElementSelector(mode='select')), descr="LinSVM on SMLR(lm=0.1) non-0") clfswh += \ FeatureSelectionClassifier(
__docformat__ = 'restructuredtext' import numpy as np import copy from mvpa.base import externals, cfg from mvpa.base.collections import SampleAttributesCollection, \ FeatureAttributesCollection, DatasetAttributesCollection from mvpa.base.types import is_datasetlike from mvpa.base.dochelpers import _str if __debug__: from mvpa.base import debug __REPR_STYLE__ = cfg.get('datasets', 'repr', 'full') if not __REPR_STYLE__ in ('full', 'str'): raise ValueError, "Incorrect value %r for option datasets.repr." \ " Valid are 'full' and 'str'." % __REPR_STYLE__ class AttrDataset(object): """Generic storage class for datasets with multiple attributes. A dataset consists of four pieces. The core is a two-dimensional array that has variables (so-called `features`) in its columns and the associated observations (so-called `samples`) in the rows. In addition a dataset may have any number of attributes for features and samples. Unsurprisingly, these are called 'feature attributes' and 'sample attributes'. Each attribute is a vector of any datatype that contains a value per each item (feature or sample). Both types
# take care of conditional import of external classifiers from mvpa.base import warning, cfg, externals from _svmbase import _SVM if __debug__: from mvpa.base import debug # SVM implementation to be used "by default" SVM = None _NuSVM = None # TODO: handle choices within cfg _VALID_BACKENDS = ('libsvm', 'shogun', 'sg') _svm_backend = cfg.get('svm', 'backend', default='libsvm').lower() if _svm_backend == 'shogun': _svm_backend = 'sg' if not _svm_backend in _VALID_BACKENDS: raise ValueError, 'Configuration option svm.backend got invalid value %s.' \ ' Valid choices are %s' % (_svm_backend, _VALID_BACKENDS) if __debug__: debug('SVM', 'SVM backend is %s' % _svm_backend) if externals.exists('shogun'): from mvpa.clfs import sg SVM = sg.SVM # Somewhat cruel hack -- define "SVM" family of kernels as binds # to specific default SVM implementation
__docformat__ = 'restructuredtext' # canonical PyMVPA version string __version__ = '0.5.0.dev' import os import random import numpy as np from mvpa.base import cfg from mvpa.base import externals from mvpa.base.info import wtf # locate data root -- data might not be installed, but if it is, it should be at # this location pymvpa_dataroot = \ cfg.get('data', 'root', default=os.path.join(os.path.dirname(__file__), 'data')) # locate PyMVPA data database root -- also might not be installed, but if it is, # it should be at this location pymvpa_datadbroot = \ cfg.get('datadb', 'root', default=os.path.join(os.curdir, 'datadb')) if not __debug__: try: import psyco psyco.profile() except ImportError: from mvpa.base import verbose verbose(2, "Psyco online compilation is not enabled") else: # Controllable seeding of random number generator
__docformat__ = 'restructuredtext' import numpy as np import copy from mvpa.base import externals, cfg from mvpa.base.collections import SampleAttributesCollection, \ FeatureAttributesCollection, DatasetAttributesCollection from mvpa.base.types import is_datasetlike from mvpa.base.dochelpers import _str if __debug__: from mvpa.base import debug __REPR_STYLE__ = cfg.get('datasets', 'repr', 'full') if not __REPR_STYLE__ in ('full', 'str'): raise ValueError, "Incorrect value %r for option datasets.repr." \ " Valid are 'full' and 'str'." % __REPR_STYLE__ class AttrDataset(object): """Generic storage class for datasets with multiple attributes. A dataset consists of four pieces. The core is a two-dimensional array that has variables (so-called `features`) in its columns and the associated observations (so-called `samples`) in the rows. In addition a dataset may have any number of attributes for features and samples. Unsurprisingly, these are called 'feature attributes' and 'sample attributes'. Each attribute is a vector of any datatype