コード例 #1
0
ファイル: test_datasetng.py プロジェクト: psederberg/PyMVPA
def test_repr():
    attr_repr = "SampleAttribute(name='TestAttr', doc='my own test', " \
                                "value=array([0, 1, 2, 3, 4]), length=None)"
    sattr = SampleAttribute(name='TestAttr', doc='my own test',
                            value=np.arange(5))
    # check precise formal representation
    ok_(repr(sattr) == attr_repr)
    # check that it actually works as a Python expression
    from numpy import array
    eattr = eval(repr(sattr))
    ok_(repr(eattr), attr_repr)

    # should also work for a simple dataset
    # Does not work due to bug in numpy:
    #  python -c "from numpy import *; print __version__; r=repr(array(['s', None])); print r; eval(r)"
    # would give "array([s, None], dtype=object)" without '' around s
    #ds = datasets['uni2small']
    ds = Dataset([[0, 1]],
                 a={'dsa1': 'v1'},
                 sa={'targets': [0]},
                 fa={'targets': ['b', 'n']})
    ds_repr = repr(ds)
    cfg_repr = cfg.get('datasets', 'repr', 'full')
    if cfg_repr == 'full':
        try:
            ok_(repr(eval(ds_repr)) == ds_repr)
        except SyntaxError, e:
            raise AssertionError, "%r cannot be evaluated" % ds_repr
コード例 #2
0
ファイル: test_datasetng.py プロジェクト: kirtyvedula/PyMVPA
def test_repr():
    attr_repr = "SampleAttribute(name='TestAttr', doc='my own test', " \
                                "value=array([0, 1, 2, 3, 4]), length=None)"
    sattr = SampleAttribute(name='TestAttr',
                            doc='my own test',
                            value=np.arange(5))
    # check precise formal representation
    ok_(repr(sattr) == attr_repr)
    # check that it actually works as a Python expression
    from numpy import array
    eattr = eval(repr(sattr))
    ok_(repr(eattr), attr_repr)

    # should also work for a simple dataset
    # Does not work due to bug in numpy:
    #  python -c "from numpy import *; print __version__; r=repr(array(['s', None])); print r; eval(r)"
    # would give "array([s, None], dtype=object)" without '' around s
    #ds = datasets['uni2small']
    ds = Dataset([[0, 1]],
                 a={'dsa1': 'v1'},
                 sa={'targets': [0]},
                 fa={'targets': ['b', 'n']})
    ds_repr = repr(ds)
    cfg_repr = cfg.get('datasets', 'repr', 'full')
    if cfg_repr == 'full':
        try:
            ok_(repr(eval(ds_repr)) == ds_repr)
        except SyntaxError, e:
            raise AssertionError, "%r cannot be evaluated" % ds_repr
コード例 #3
0
ファイル: dataset.py プロジェクト: JohnGriffiths/nidata
__docformat__ = 'restructuredtext'

import numpy as np
import copy

from mvpa2.base import externals, cfg, warning
from mvpa2.base.collections import SampleAttributesCollection, \
        FeatureAttributesCollection, DatasetAttributesCollection
from mvpa2.base.types import is_datasetlike
from mvpa2.base.dochelpers import _str, _strid

if __debug__:
    from mvpa2.base import debug

__REPR_STYLE__ = cfg.get('datasets', 'repr', 'full')

if not __REPR_STYLE__ in ('full', 'str'):
    raise ValueError, "Incorrect value %r for option datasets.repr." \
          " Valid are 'full' and 'str'." % __REPR_STYLE__

class AttrDataset(object):
    """Generic storage class for datasets with multiple attributes.

    A dataset consists of four pieces.  The core is a two-dimensional
    array that has variables (so-called `features`) in its columns and
    the associated observations (so-called `samples`) in the rows.  In
    addition a dataset may have any number of attributes for features
    and samples.  Unsurprisingly, these are called 'feature attributes'
    and 'sample attributes'.  Each attribute is a vector of any datatype
    that contains a value per each item (feature or sample). Both types
コード例 #4
0
ファイル: __init__.py プロジェクト: Anhmike/PyMVPA
# commit hash to be filled in by Git upon export/archive
hashfilename = pathjoin(os.path.dirname(__file__), 'COMMIT_HASH')
__hash__ = ''
if os.path.exists(hashfilename):
    hashfile = open(hashfilename, 'r')
    __hash__ = hashfile.read().strip()
    hashfile.close()

#
# Data paths
#

# locate data root -- data might not be installed, but if it is, it should be at
# this location
pymvpa_dataroot = \
        cfg.get('data', 'root',
                default=pathjoin(os.path.dirname(__file__), 'data'))
# locate PyMVPA data database root -- also might not be installed, but if it is,
# it should be at this location
pymvpa_datadbroot = \
        cfg.get('datadb', 'root',
                default=pathjoin(os.getcwd(), 'datadb'))


#
# Debugging and optimization
#

if not __debug__:
    try:
        import psyco
        psyco.profile()
コード例 #5
0
ファイル: __init__.py プロジェクト: psederberg/PyMVPA
# commit hash to be filled in by Git upon export/archive
hashfilename = os.path.join(os.path.dirname(__file__), 'COMMIT_HASH')
__hash__ = ''
if os.path.exists(hashfilename):
    hashfile = open(hashfilename, 'r')
    __hash__ = hashfile.read().strip()
    hashfile.close()

#
# Data paths
#

# locate data root -- data might not be installed, but if it is, it should be at
# this location
pymvpa_dataroot = \
        cfg.get('data', 'root',
                default=os.path.join(os.path.dirname(__file__), 'data'))
# locate PyMVPA data database root -- also might not be installed, but if it is,
# it should be at this location
pymvpa_datadbroot = \
        cfg.get('datadb', 'root',
                default=os.path.join(os.curdir, 'datadb'))


#
# Debugging and optimization
#

if not __debug__:
    try:
        import psyco
        psyco.profile()
コード例 #6
0
__docformat__ = 'restructuredtext'

import numpy as np
import copy

from mvpa2.base import externals, cfg
from mvpa2.base.collections import SampleAttributesCollection, \
        FeatureAttributesCollection, DatasetAttributesCollection
from mvpa2.base.types import is_datasetlike
from mvpa2.base.dochelpers import _str, _strid

if __debug__:
    from mvpa2.base import debug

__REPR_STYLE__ = cfg.get('datasets', 'repr', 'full')

if not __REPR_STYLE__ in ('full', 'str'):
    raise ValueError, "Incorrect value %r for option datasets.repr." \
          " Valid are 'full' and 'str'." % __REPR_STYLE__


class AttrDataset(object):
    """Generic storage class for datasets with multiple attributes.

    A dataset consists of four pieces.  The core is a two-dimensional
    array that has variables (so-called `features`) in its columns and
    the associated observations (so-called `samples`) in the rows.  In
    addition a dataset may have any number of attributes for features
    and samples.  Unsurprisingly, these are called 'feature attributes'
    and 'sample attributes'.  Each attribute is a vector of any datatype
コード例 #7
0
# commit hash to be filled in by Git upon export/archive
hashfilename = os.path.join(os.path.dirname(__file__), 'COMMIT_HASH')
__hash__ = ''
if os.path.exists(hashfilename):
    hashfile = open(hashfilename, 'r')
    __hash__ = hashfile.read().strip()
    hashfile.close()

#
# Data paths
#

# locate data root -- data might not be installed, but if it is, it should be at
# this location
pymvpa_dataroot = \
        cfg.get('data', 'root',
                default=os.path.join(os.path.dirname(__file__), 'data'))
# locate PyMVPA data database root -- also might not be installed, but if it is,
# it should be at this location
pymvpa_datadbroot = \
        cfg.get('datadb', 'root',
                default=os.path.join(os.curdir, 'datadb'))

#
# Debugging and optimization
#

if not __debug__:
    try:
        import psyco
        psyco.profile()
    except ImportError:
コード例 #8
0
ファイル: __init__.py プロジェクト: jgors/PyMVPA
# commit hash to be filled in by Git upon export/archive
hashfilename = os.path.join(os.path.dirname(__file__), "COMMIT_HASH")
__hash__ = ""
if os.path.exists(hashfilename):
    hashfile = open(hashfilename, "r")
    __hash__ = hashfile.read().strip()
    hashfile.close()

#
# Data paths
#

# locate data root -- data might not be installed, but if it is, it should be at
# this location
pymvpa_dataroot = cfg.get("data", "root", default=os.path.join(os.path.dirname(__file__), "data"))
# locate PyMVPA data database root -- also might not be installed, but if it is,
# it should be at this location
pymvpa_datadbroot = cfg.get("datadb", "root", default=os.path.join(os.curdir, "datadb"))


#
# Debugging and optimization
#

if not __debug__:
    try:
        import psyco

        psyco.profile()
    except ImportError:
コード例 #9
0
# commit hash to be filled in by Git upon export/archive
hashfilename = pathjoin(os.path.dirname(__file__), 'COMMIT_HASH')
__hash__ = ''
if os.path.exists(hashfilename):
    hashfile = open(hashfilename, 'r')
    __hash__ = hashfile.read().strip()
    hashfile.close()

#
# Data paths
#

# locate data root -- data might not be installed, but if it is, it should be at
# this location
pymvpa_dataroot = \
        cfg.get('data', 'root',
                default=pathjoin(os.path.dirname(__file__), 'data'))
# locate PyMVPA data database root -- also might not be installed, but if it is,
# it should be at this location
pymvpa_datadbroot = \
        cfg.get('datadb', 'root',
                default=pathjoin(os.getcwd(), 'datadb'))

#
# Debugging and optimization
#

if not __debug__:
    try:
        import psyco
        psyco.profile()
    except ImportError:
コード例 #10
0
ファイル: dataset.py プロジェクト: neurosbh/PyMVPA
"""Multi-purpose dataset container with support for attributes."""

__docformat__ = "restructuredtext"

import numpy as np
import copy

from mvpa2.base import externals, cfg, warning
from mvpa2.base.collections import SampleAttributesCollection, FeatureAttributesCollection, DatasetAttributesCollection
from mvpa2.base.types import is_datasetlike
from mvpa2.base.dochelpers import _str, _strid

if __debug__:
    from mvpa2.base import debug

__REPR_STYLE__ = cfg.get("datasets", "repr", "full")

if not __REPR_STYLE__ in ("full", "str"):
    raise ValueError, "Incorrect value %r for option datasets.repr." " Valid are 'full' and 'str'." % __REPR_STYLE__


class AttrDataset(object):
    """Generic storage class for datasets with multiple attributes.

    A dataset consists of four pieces.  The core is a two-dimensional
    array that has variables (so-called `features`) in its columns and
    the associated observations (so-called `samples`) in the rows.  In
    addition a dataset may have any number of attributes for features
    and samples.  Unsurprisingly, these are called 'feature attributes'
    and 'sample attributes'.  Each attribute is a vector of any datatype
    that contains a value per each item (feature or sample). Both types
コード例 #11
0
ファイル: warehouse.py プロジェクト: Python3pkg/PyMVPA
# BLR
from mvpa2.clfs.blr import BLR
clfswh += RegressionAsClassifier(BLR(descr="BLR()"), descr="BLR Classifier")

#PLR
from mvpa2.clfs.plr import PLR
clfswh += PLR(descr="PLR()")
if externals.exists('scipy'):
    clfswh += PLR(reduced=0.05, descr="PLR(reduced=0.01)")

# SVM stuff

if len(clfswh['linear', 'svm']) > 0:

    linearSVMC = clfswh['linear', 'svm',
                        cfg.get('svm', 'backend', default='libsvm').lower()][0]

    # "Interesting" classifiers
    clfswh += \
         FeatureSelectionClassifier(
             linearSVMC.clone(),
             SensitivityBasedFeatureSelection(
                SMLRWeights(SMLR(lm=0.1, implementation="C"),
                            postproc=maxofabs_sample()),
                RangeElementSelector(mode='select')),
             descr="LinSVM on SMLR(lm=0.1) non-0")

    _rfeclf = linearSVMC.clone()
    clfswh += \
         FeatureSelectionClassifier(
             _rfeclf,
コード例 #12
0
ファイル: warehouse.py プロジェクト: schoeke/PyMVPA
from mvpa2.clfs.blr import BLR
clfswh += RegressionAsClassifier(BLR(descr="BLR()"),
                                 descr="BLR Classifier")

#PLR
from mvpa2.clfs.plr import PLR
clfswh += PLR(descr="PLR()")
if externals.exists('scipy'):
    clfswh += PLR(reduced=0.05, descr="PLR(reduced=0.01)")

# SVM stuff

if len(clfswh['linear', 'svm']) > 0:

    linearSVMC = clfswh['linear', 'svm',
                             cfg.get('svm', 'backend', default='libsvm').lower()
                             ][0]

    # "Interesting" classifiers
    clfswh += \
         FeatureSelectionClassifier(
             linearSVMC.clone(),
             SensitivityBasedFeatureSelection(
                SMLRWeights(SMLR(lm=0.1, implementation="C"),
                            postproc=maxofabs_sample()),
                RangeElementSelector(mode='select')),
             descr="LinSVM on SMLR(lm=0.1) non-0")


    clfswh += \
        FeatureSelectionClassifier(
コード例 #13
0
ファイル: warehouse.py プロジェクト: psederberg/PyMVPA
from mvpa2.clfs.blr import BLR

clfswh += RegressionAsClassifier(BLR(descr="BLR()"), descr="BLR Classifier")

# PLR
from mvpa2.clfs.plr import PLR

clfswh += PLR(descr="PLR()")
if externals.exists("scipy"):
    clfswh += PLR(reduced=0.05, descr="PLR(reduced=0.01)")

# SVM stuff

if len(clfswh["linear", "svm"]) > 0:

    linearSVMC = clfswh["linear", "svm", cfg.get("svm", "backend", default="libsvm").lower()][0]

    # "Interesting" classifiers
    clfswh += FeatureSelectionClassifier(
        linearSVMC.clone(),
        SensitivityBasedFeatureSelection(
            SMLRWeights(SMLR(lm=0.1, implementation="C"), postproc=maxofabs_sample()),
            RangeElementSelector(mode="select"),
        ),
        descr="LinSVM on SMLR(lm=0.1) non-0",
    )

    clfswh += FeatureSelectionClassifier(
        linearSVMC.clone(),
        SensitivityBasedFeatureSelection(
            SMLRWeights(SMLR(lm=1.0, implementation="C"), postproc=maxofabs_sample()),