コード例 #1
0
ファイル: projections.py プロジェクト: B-Rich/PyMVPA
ndatasets = len(datasets)
nmappers = len(mappers.keys())

pl.figure(figsize=(8,8))
fig = 1

for ds in datasets:
    for mname, mapper in mappers.iteritems():
        mapper.train(ds)

        dproj = mapper.forward(ds.samples)
        mproj = mapper.proj
        pl.subplot(ndatasets, nmappers, fig)
        if fig <= 3:
            pl.title(mname)
        pl.axis('equal')

        pl.scatter(ds.samples[:, 0] - center[0],
                  ds.samples[:, 1] - center[1],
                  s=30, c=(ds.sa.targets) * 200)
        plot_proj_dir(mproj)
        fig += 1


if cfg.getboolean('examples', 'interactive', True):
    pl.show()

"""
Output of the example:
コード例 #2
0
"""
With these parameters we can compute high-resultion curves for the estimated
time course, and plot it together with the "true" time course, and the data:
"""

x = np.linspace(0,20)
curves = [(x, single_gamma_hrf(x, 6, 7, 1)),
          (x, single_gamma_hrf(x, *fpar))]

# plot data (with error bars) and both curves
plot_err_line(a, curves=curves, linestyle='-')

# add legend to plot
pl.legend(('original', 'fit'))
pl.title('True and estimated BOLD response')

"""

.. image:: ../pics/ex_curvefitting_bold.*
   :align: center
   :alt: BOLD response fitting example


Searchlight accuracy distributions
----------------------------------

When doing a searchlight analysis one might have the idea that the
resulting accuracies are actually sampled from two distributions: one
causes by an actual signal source and the chance distribution.  Let's
assume the these two distributions can be approximated by a Gaussian,
コード例 #3
0
ndatasets = len(datasets)
nmappers = len(mappers.keys())

pl.figure(figsize=(8, 8))
fig = 1

for ds in datasets:
    for mname, mapper in mappers.iteritems():
        mapper.train(ds)

        dproj = mapper.forward(ds.samples)
        mproj = mapper.proj
        pl.subplot(ndatasets, nmappers, fig)
        if fig <= 3:
            pl.title(mname)
        pl.axis('equal')

        pl.scatter(ds.samples[:, 0] - center[0],
                   ds.samples[:, 1] - center[1],
                   s=30,
                   c=(ds.sa.targets) * 200)
        plot_proj_dir(mproj)
        fig += 1

if cfg.getboolean('examples', 'interactive', True):
    pl.show()
"""
Output of the example:

.. image:: ../pics/ex_projections.*
コード例 #4
0
    result = kernel.compute(data)

# In the following we draw some 2D functions at random from the
# distribution N(O,kernel) defined by each available kernel and
# plot them. These plots shows the flexibility of a given kernel
# (with default parameters) when doing interpolation. The choice
# of a kernel defines a prior probability over the function space
# used for regression/classfication with GPR/GPC.
count = 1
for k in kernel_dictionary.keys():
    pl.subplot(3, 4, count)
    # X = np.random.rand(size)*12.0-6.0
    # X.sort()
    X = np.arange(-1, 1, .02)
    X = X[:, np.newaxis]
    ker = kernel_dictionary[k]()
    ker.compute(X, X)
    print k
    K = np.asarray(ker)
    for i in range(10):
        f = np.random.multivariate_normal(np.zeros(X.shape[0]), K)
        pl.plot(X[:, 0], f, "b-")

    pl.title(k)
    pl.axis('tight')
    count += 1

if cfg.getboolean('examples', 'interactive', True):
    # show all the cool figures
    pl.show()