Example #1
0
def test_em_gmm_largedim(verbose=0):
    # testing the GMM model in larger dimensions

    # generate some data
    dim = 10
    x = nr.randn(100, dim)
    x[:30, :] += 1

    # estimate different GMMs of that data
    maxiter = 100
    delta = 1.0e-4

    for k in range(1, 3):
        lgmm = GMM(k, dim)
        lgmm.initialize(x)
        bic = lgmm.estimate(x, maxiter, delta, verbose)
        if verbose:
            print "bic of the %d-classes model" % k, bic

    z = lgmm.map_label(x)

    # define the correct labelling
    u = np.zeros(100)
    u[:30] = 1

    # check the correlation between the true labelling
    # and the computed one
    eta = np.absolute(np.dot(z - z.mean(), u - u.mean()) / (np.std(z) * np.std(u) * 100))
    assert_true(eta > 0.3)
def test_model_selection_exact():
    prng = np.random.RandomState(10)
    data, XYZ, XYZvol, vardata, signal = make_data(n=30, dim=20, r=3, 
                amplitude=1, noise=0, jitter=0, prng=prng)
    labels = (signal > 0).astype(int)
    P1 = os.multivariate_stat(data, labels=labels)
    P1.init_hidden_variables()
    P1.evaluate(nsimu=100, burnin=10, verbose=verbose)
    L1 = P1.compute_log_region_likelihood()
    Prior1 = P1.compute_log_prior()
    #v, m_mean, m_var = P1.v.copy(), P1.m_mean.copy(), P1.m_var.copy()
    Post1 = P1.compute_log_posterior(nsimu=1e2, burnin=1e2, verbose=verbose)
    M1 = L1 + Prior1[:-1] - Post1[:-1]
    yield assert_almost_equal(M1.mean(), 
                              P1.compute_marginal_likelihood().mean(), 0)
    P0 = os.multivariate_stat(data, labels=labels)
    P0.network *= 0
    P0.init_hidden_variables()
    P0.evaluate(nsimu=100, burnin=100, verbose=verbose)
    L0 = P0.compute_log_region_likelihood()
    Prior0 = P0.compute_log_prior()
    Post0 = P0.compute_log_posterior(nsimu=1e2, burnin=1e2, 
                                     verbose=verbose)
    M0 = L0 + Prior0[:-1] - Post0[:-1]
    yield assert_almost_equal(M0.mean(), 
                              P0.compute_marginal_likelihood().mean(), 0)
    yield assert_true(M1[1] > M0[1])
    yield assert_true(M1[0] < M0[0])
Example #3
0
def test_series_from_mask():
    """ Test the smoothing of the timeseries extraction
    """
    # A delta in 3D
    data = np.zeros((40, 40, 40, 2))
    data[20, 20, 20] = 1
    mask = np.ones((40, 40, 40), dtype=np.bool)
    with InTemporaryDirectory():
        for affine in (np.eye(4), np.diag((1, 1, -1, 1)),
                        np.diag((.5, 1, .5, 1))):
            img = nib.Nifti1Image(data, affine)
            nib.save(img, 'testing.nii')
            series, header = series_from_mask('testing.nii', mask, smooth=9)
            series = np.reshape(series[:, 0], (40, 40, 40))
            vmax = series.max()
            # We are expecting a full-width at half maximum of
            # 9mm/voxel_size:
            above_half_max = series > .5*vmax
            for axis in (0, 1, 2):
                proj = np.any(np.any(np.rollaxis(above_half_max,
                                axis=axis), axis=-1), axis=-1)
                assert_equal(proj.sum(), 9/np.abs(affine[axis, axis]))

        # Check that NaNs in the data do not propagate
        data[10, 10, 10] = np.NaN
        img = nib.Nifti1Image(data, affine)
        nib.save(img, 'testing.nii')
        series, header = series_from_mask('testing.nii', mask, smooth=9)
        assert_true(np.all(np.isfinite(series)))
Example #4
0
def test_em_gmm_multi(verbose=0):
    # Playing with various initilizations on the same data

    # generate some data
    dim = 2
    x = np.concatenate((nr.randn(1000, dim), 3 + 2 * nr.randn(100, dim)))

    # estimate different GMMs of that data
    maxiter = 100
    delta = 1.0e-4
    ninit = 5
    k = 2

    lgmm = GMM(k, dim)
    bgmm = lgmm.initialize_and_estimate(x, maxiter, delta, ninit, verbose)
    bic = bgmm.evidence(x)

    if verbose:
        print "bic of the best model", bic

    if verbose:
        # plot the result
        from test_bgmm import plot2D

        z = lgmm.map_label(x)
        plot2D(x, lgmm, z, show=1, verbose=0)

    assert_true(np.isfinite(bic))
Example #5
0
def test_select_gmm_old_diag(verbose=0):
    # Computing the BIC value for different configurations

    # generate some data
    dim = 2
    x = np.concatenate((nr.randn(100, 2), 3 + 2 * nr.randn(100, 2)))

    # estimate different GMMs of that data
    k = 2
    prec_type = "diag"

    lgmm = gmm.GMM_old(k, dim, prec_type)
    maxiter = 300
    delta = 0.001
    ninit = 5
    kvals = np.arange(10) + 2

    La, LL, bic = lgmm.optimize_with_bic(x, kvals, maxiter, delta, ninit, verbose)

    if verbose:
        # plot the result
        xmin = 1.1 * x[:, 0].min() - 0.1 * x[:, 0].max()
        xmax = 1.1 * x[:, 0].max() - 0.1 * x[:, 0].min()
        ymin = 1.1 * x[:, 1].min() - 0.1 * x[:, 1].max()
        ymax = 1.1 * x[:, 1].max() - 0.1 * x[:, 1].min()
        gd = gmm.grid_descriptor(2)
        gd.getinfo([xmin, xmax, ymin, ymax], [50, 50])
        gmm.sample(gd, x, verbose=0)

    assert_true(lgmm.k < 5)
Example #6
0
def test_em_gmm_cv(verbose=0):
    # Comparison of different GMMs using cross-validation

    # generate some data
    dim = 2
    xtrain = np.concatenate((nr.randn(100, dim), 3 + 2 * nr.randn(100, dim)))
    xtest = np.concatenate((nr.randn(1000, dim), 3 + 2 * nr.randn(1000, dim)))

    # estimate different GMMs for xtrain, and test it on xtest
    prec_type = "full"
    k = 2
    maxiter = 300
    delta = 1.0e-4
    ll = []

    # model 1
    lgmm = GMM(k, dim, prec_type)
    lgmm.initialize(xtrain)
    bic = lgmm.estimate(xtrain, maxiter, delta)
    ll.append(lgmm.test(xtest).mean())

    prec_type = "diag"
    # model 2
    lgmm = GMM(k, dim, prec_type)
    lgmm.initialize(xtrain)
    bic = lgmm.estimate(xtrain, maxiter, delta)
    ll.append(lgmm.test(xtest).mean())

    for k in [1, 3, 10]:
        lgmm = GMM(k, dim, prec_type)
        lgmm.initialize(xtrain)
        bic = lgmm.estimate(xtrain, maxiter, delta)
        ll.append(lgmm.test(xtest).mean())

    assert_true(ll[4] < ll[1])
Example #7
0
def test_scaling_io_dtype():
    # Does data dtype get set?
    # Is scaling correctly applied?
    rng = np.random.RandomState(19660520) # VBD
    ulp1_f32 = np.finfo(np.float32).eps
    types = (np.uint8, np.uint16, np.int16, np.int32, np.float32)
    with InTemporaryDirectory():
        for in_type in types:
            for out_type in types:
                data, _ = randimg_in2out(rng, in_type, out_type, 'img.nii')
                img = load_image('img.nii')
                # Check the output type is as expected
                hdr = img.metadata['header']
                assert_equal(hdr.get_data_dtype().type, out_type)
                # Check the data is within reasonable bounds. The exact bounds
                # are a little annoying to calculate - see
                # nibabel/tests/test_round_trip for inspiration
                data_back = img.get_data().copy() # copy to detach from file
                del img
                top = np.abs(data - data_back)
                nzs = (top !=0) & (data !=0)
                abs_err = top[nzs]
                if abs_err.size != 0: # all exact, that's OK.
                    continue
                rel_err = abs_err / data[nzs]
                if np.dtype(out_type).kind in 'iu':
                    slope, inter = hdr.get_slope_inter()
                    abs_err_thresh = slope / 2.0
                    rel_err_thresh = ulp1_f32
                elif np.dtype(out_type).kind == 'f':
                    abs_err_thresh = big_bad_ulp(data.astype(out_type))[nzs]
                    rel_err_thresh = ulp1_f32
                assert_true(np.all(
                    (abs_err <= abs_err_thresh) |
                    (rel_err <= rel_err_thresh)))
Example #8
0
def test_series_from_mask():
    """ Test the smoothing of the timeseries extraction
    """
    # A delta in 3D
    data = np.zeros((40, 40, 40, 2))
    data[20, 20, 20] = 1
    mask = np.ones((40, 40, 40), dtype=np.bool)
    with InTemporaryDirectory():
        for affine in (np.eye(4), np.diag(
            (1, 1, -1, 1)), np.diag((.5, 1, .5, 1))):
            img = nib.Nifti1Image(data, affine)
            nib.save(img, 'testing.nii')
            series, header = series_from_mask('testing.nii', mask, smooth=9)
            series = np.reshape(series[:, 0], (40, 40, 40))
            vmax = series.max()
            # We are expecting a full-width at half maximum of
            # 9mm/voxel_size:
            above_half_max = series > .5 * vmax
            for axis in (0, 1, 2):
                proj = np.any(np.any(np.rollaxis(above_half_max, axis=axis),
                                     axis=-1),
                              axis=-1)
                assert_equal(proj.sum(), 9 / np.abs(affine[axis, axis]))

        # Check that NaNs in the data do not propagate
        data[10, 10, 10] = np.NaN
        img = nib.Nifti1Image(data, affine)
        nib.save(img, 'testing.nii')
        series, header = series_from_mask('testing.nii', mask, smooth=9)
        assert_true(np.all(np.isfinite(series)))
def test_model_selection_mfx_spatial_rand_walk():
    prng = np.random.RandomState(10)
    data, XYZ, XYZvol, vardata, signal = make_data(n=20, 
                                dim=np.array([1,20,20]), 
                                r=3, amplitude=3, noise=1,
                                jitter=0.5, prng=prng)
    labels = (signal > 0).astype(int)
    P = os.multivariate_stat(data, vardata, XYZ, std=0.5, sigma=5, labels=labels)
    P.network[:] = 0
    P.init_hidden_variables()
    P.evaluate(nsimu=100, burnin=100, verbose=verbose, 
                proposal='rand_walk', proposal_std=0.5)
    L00 = P.compute_log_region_likelihood()
    # Test simulated annealing procedure
    P.estimate_displacements_SA(nsimu=100, c=0.99, 
        proposal_std=P.proposal_std, verbose=verbose)
    L0 = P.compute_log_region_likelihood()
    yield assert_true(L0.sum() > L00.sum())
    #Prior0 = P.compute_log_prior()
    #Post0 = P.compute_log_posterior(nsimu=1e2, burnin=1e2, verbose=verbose)
    #M0 = L0 + Prior0[:-1] - Post0[:-1]
    M0 = P.compute_marginal_likelihood(update_spatial=True)
    #yield assert_almost_equal(M0.sum(), P.compute_marginal_likelihood(verbose=verbose).sum(), 0)
    P.network[1] = 1
    #P.init_hidden_variables(init_spatial=False)
    P.init_hidden_variables(init_spatial=False)
    P.evaluate(nsimu=100, burnin=100, verbose=verbose, 
                update_spatial=False, proposal_std=P.proposal_std)
    #L1 = P.compute_log_region_likelihood()
    #Prior1 = P.compute_log_prior()
    #Post1 = P.compute_log_posterior(nsimu=1e2, burnin=1e2, verbose=verbose)
    #M1 = L1 + Prior1[:-1] - Post1[:-1]
    M1 = P.compute_marginal_likelihood(update_spatial=True)
    #yield assert_almost_equal(0.1*M1.sum(), 0.1*P.compute_marginal_likelihood(verbose=verbose).sum(), 0)
    yield assert_true(M1 > M0)
Example #10
0
def test_em_gmm_diag(verbose=0):
    # Computing the BIC value for GMMs with different number of classes,
    # with diagonal covariance models The BIC should maximal for a
    # number of classes of 1 or 2

    # generate some data
    dim = 2
    x = np.concatenate((nr.randn(1000, dim), 3 + 2 * nr.randn(1000, dim)))

    # estimate different GMMs of that data
    maxiter = 100
    delta = 1.0e-8
    prec_type = "diag"

    bic = np.zeros(5)
    for k in range(1, 6):
        lgmm = GMM(k, dim, prec_type)
        lgmm.initialize(x)
        bic[k - 1] = lgmm.estimate(x, maxiter, delta, verbose)
        if verbose:
            print "bic of the %d-classes model" % k, bic

    z = lgmm.map_label(x)

    assert_true((z.max() + 1 == lgmm.k) & (bic[4] < bic[1]))
Example #11
0
def test_scaling_io_dtype():
    # Does data dtype get set?
    # Is scaling correctly applied?
    rng = np.random.RandomState(19660520)  # VBD
    ulp1_f32 = np.finfo(np.float32).eps
    types = (np.uint8, np.uint16, np.int16, np.int32, np.float32)
    with InTemporaryDirectory():
        for in_type in types:
            for out_type in types:
                data, _ = randimg_in2out(rng, in_type, out_type, 'img.nii')
                img = load_image('img.nii')
                # Check the output type is as expected
                hdr = img.metadata['header']
                assert_equal(hdr.get_data_dtype().type, out_type)
                # Check the data is within reasonable bounds. The exact bounds
                # are a little annoying to calculate - see
                # nibabel/tests/test_round_trip for inspiration
                data_back = img.get_data().copy()  # copy to detach from file
                del img
                top = np.abs(data - data_back)
                nzs = (top != 0) & (data != 0)
                abs_err = top[nzs]
                if abs_err.size != 0:  # all exact, that's OK.
                    continue
                rel_err = abs_err / data[nzs]
                if np.dtype(out_type).kind in 'iu':
                    slope, inter = hdr.get_slope_inter()
                    abs_err_thresh = slope / 2.0
                    rel_err_thresh = ulp1_f32
                elif np.dtype(out_type).kind == 'f':
                    abs_err_thresh = big_bad_ulp(data.astype(out_type))[nzs]
                    rel_err_thresh = ulp1_f32
                assert_true(
                    np.all((abs_err <= abs_err_thresh)
                           | (rel_err <= rel_err_thresh)))
Example #12
0
def test_as_image():
    # test image creation / pass through function
    img = as_image(funcfile)  # string filename
    img1 = as_image(six.text_type(funcfile))  # unicode
    img2 = as_image(img)
    assert_equal(img.affine, img1.affine)
    assert_array_equal(img.get_data(), img1.get_data())
    assert_true(img is img2)
Example #13
0
def test_as_image():
    # test image creation / pass through function
    img = as_image(funcfile)  # string filename
    img1 = as_image(six.text_type(funcfile))  # unicode
    img2 = as_image(img)
    assert_equal(img.affine, img1.affine)
    assert_array_equal(img.get_data(), img1.get_data())
    assert_true(img is img2)
Example #14
0
def test_agreement():
    # The test: does Protocol manage to recreate the design of fMRIstat?
    for design_type in ['event', 'block']:
        dd = D[design_type]
        for i in range(X[design_type].shape[1]):
            _, cmax = matchcol(X[design_type][:,i], fmristat[design_type])
            if not dd.dtype.names[i].startswith('ns'):
                assert_true(np.greater(np.abs(cmax), 0.999))
Example #15
0
def test_agreement():
    # The test: does Protocol manage to recreate the design of fMRIstat?
    X, c, D = create_protocols()
    for design_type in ['event', 'block']:
        dd = D[design_type]
        for i in range(X[design_type].shape[1]):
            _, cmax = matchcol(X[design_type][:, i], fmristat[design_type])
            if not dd.dtype.names[i].startswith('ns'):
                assert_true(np.greater(np.abs(cmax), 0.999))
Example #16
0
def test_threshold_connect_components():
    a = np.zeros((10, 10))
    a[0, 0] = 1
    a[3, 4] = 1
    a = threshold_connect_components(a, 2)
    assert_true(np.all(a == 0))
    a[0, 0:3] = 1
    b = threshold_connect_components(a, 2)
    assert_true(np.all(a == b))
Example #17
0
def test_threshold_connect_components():
    a = np.zeros((10, 10))
    a[0, 0] = 1
    a[3, 4] = 1
    a = threshold_connect_components(a, 2)
    assert_true(np.all(a == 0))
    a[0, 0:3] = 1
    b = threshold_connect_components(a, 2)
    assert_true(np.all(a == b))
Example #18
0
def test_image_list():
    img = load_image(funcfile)
    exp_shape = (17, 21, 3, 20)
    imglst = ImageList.from_image(img, axis=-1)
    
    # Test empty ImageList
    emplst = ImageList()
    yield assert_equal(len(emplst.list), 0)

    # Test non-image construction
    a = np.arange(10)
    yield assert_raises(ValueError, ImageList, a)
    yield assert_raises(ValueError, ImageList.from_image, img, None)

    # check all the axes
    for i in range(4):
        order = range(4)
        order.remove(i)
        order.insert(0,i)
        img_re_i = img.reordered_reference(order).reordered_axes(order)
        imglst_i = ImageList.from_image(img, axis=i)

        yield assert_equal(imglst_i.list[0].shape, img_re_i.shape[1:])
        
        # check the affine as well

        yield assert_almost_equal(imglst_i.list[0].affine, 
                                  img_re_i.affine[1:,1:])

    yield assert_equal(img.shape, exp_shape)

    # length of image list should match number of frames
    yield assert_equal(len(imglst.list), img.shape[3])

    # check the affine
    A = np.identity(4)
    A[:3,:3] = img.affine[:3,:3]
    A[:3,-1] = img.affine[:3,-1]
    yield assert_almost_equal(imglst.list[0].affine, A)

    # Slicing an ImageList should return an ImageList
    sublist = imglst[2:5]
    yield assert_true(isinstance(sublist, ImageList))
    # Except when we're indexing one element
    yield assert_true(isinstance(imglst[0], Image))
    # Verify array interface
    # test __array__
    yield assert_true(isinstance(np.asarray(sublist), np.ndarray))
    # Test __setitem__
    sublist[2] = sublist[0]
    yield assert_equal(np.asarray(sublist[0]).mean(),
                       np.asarray(sublist[2]).mean())
    # Test iterator
    for x in sublist:
        yield assert_true(isinstance(x, Image))
        yield assert_equal(x.shape, exp_shape[:3])
Example #19
0
def test_same_basis():
    arr4d = data['fmridata']
    shp = arr4d.shape
    arr2d =  arr4d.reshape((np.prod(shp[:3]), shp[3]))
    res = pca(arr2d, axis=-1)
    p1b_0 = pos1basis(res)
    for i in range(3):
        res_again = pca(arr2d, axis=-1)
        assert_true(np.all(pos1basis(res_again) ==
                           p1b_0))
Example #20
0
def test_kernel():
    # Verify that convolution with a delta function gives the correct
    # answer.
    tol = 0.9999
    sdtol = 1.0e-8
    for x in range(6):
        shape = randint(30,60,(3,))
        # pos of delta
        ii, jj, kk = randint(11,17, (3,))
        # random affine coordmap (diagonal and translations)
        coordmap = AffineTransform.from_start_step('ijk', 'xyz', 
                                          randint(5,20,(3,))*0.25,
                                          randint(5,10,(3,))*0.5)
        # delta function in 3D array
        signal = np.zeros(shape)
        signal[ii,jj,kk] = 1.
        signal = Image(signal, coordmap=coordmap)
        # A filter with coordmap, shape matched to image
        kernel = LinearFilter(coordmap, shape, 
                              fwhm=randint(50,100)/10.)
        # smoothed normalized 3D array
        ssignal = kernel.smooth(signal).get_data()
        ssignal[:] *= kernel.norms[kernel.normalization]
        # 3 points * signal.size array
        I = np.indices(ssignal.shape)
        I.shape = (kernel.coordmap.ndims[0], np.product(shape))
        # location of maximum in smoothed array
        i, j, k = I[:, np.argmax(ssignal[:].flat)]
        # same place as we put it before smoothing?
        assert_equal((i,j,k), (ii,jj,kk))
        # get physical points position relative to position of delta
        Z = kernel.coordmap(I.T) - kernel.coordmap([i,j,k])
        _k = kernel(Z)
        _k.shape = ssignal.shape
        assert_true((np.corrcoef(_k[:].flat, ssignal[:].flat)[0,1] > tol))
        assert_true(((_k[:] - ssignal[:]).std() < sdtol))

        def _indices(i,j,k,axis):
            I = np.zeros((3,20))
            I[0] += i
            I[1] += j
            I[2] += k
            I[axis] += np.arange(-10,10)
            return I.T

        vx = ssignal[i,j,(k-10):(k+10)]
        xformed_ijk = coordmap([i, j, k])
        vvx = coordmap(_indices(i,j,k,2)) - xformed_ijk
        assert_true((np.corrcoef(vx, kernel(vvx))[0,1] > tol))
        vy = ssignal[i,(j-10):(j+10),k]
        vvy = coordmap(_indices(i,j,k,1)) - xformed_ijk
        assert_true((np.corrcoef(vy, kernel(vvy))[0,1] > tol))
        vz = ssignal[(i-10):(i+10),j,k]
        vvz = coordmap(_indices(i,j,k,0)) - xformed_ijk
        assert_true((np.corrcoef(vz, kernel(vvz))[0,1] > tol))
Example #21
0
def test_em_selection():
    # test that the basic GMM-based model selection tool returns
    # something sensible (i.e. the gmm used to represent the data has
    # indeed one or two classes)

    # generate some data
    dim = 2
    x = np.concatenate((nr.randn(100, dim), 3 + 2 * nr.randn(100, dim)))

    krange = range(1, 10)
    lgmm = gmm.best_fitting_GMM(x, krange, prec_type="full", niter=100, delta=1.0e-4, ninit=1, verbose=0)
    assert_true(lgmm.k < 4)
Example #22
0
def test_em_loglike1():
    dim = 1
    k = 3
    n = 1000
    x = nr.randn(n, dim)
    lgmm = GMM(k, dim)
    lgmm.initialize(x)
    lgmm.estimate(x)
    ll = lgmm.average_log_like(x)
    ent = 0.5 * (1 + np.log(2 * np.pi))
    print ll, ent
    assert_true(np.absolute(ll + ent) < 3.0 / np.sqrt(n))
Example #23
0
def test_is_image():
    # tests for tests for image
    img = load_image(anatfile)
    yield assert_true(is_image(img))
    class C(object): pass
    yield assert_false(is_image(C()))
    class C(object):
        def __array__(self): pass
    yield assert_false(is_image(C()))
    class C(object):
        coordmap = None
        def __array__(self): pass
    yield assert_true(is_image(img))
Example #24
0
def test_em_loglike4():
    dim = 5
    k = 1
    n = 1000
    scale = 3.0
    offset = 4.0
    x = offset + scale * nr.randn(n, dim)
    lgmm = GMM(k, dim)
    lgmm.initialize(x)
    lgmm.estimate(x)
    ll = lgmm.average_log_like(x)
    ent = dim * 0.5 * (1 + np.log(2 * np.pi * scale ** 2))
    print ll, ent
    assert_true(np.absolute(ll + ent) < dim * 3.0 / np.sqrt(n))
Example #25
0
def test_as_image():
    # test image creation / pass through function
    img = as_image(funcfile) # string filename
    img1 = as_image(unicode(funcfile))
    img2 = as_image(img)
    yield assert_equal(img.affine, img1.affine)
    yield assert_array_equal(np.asarray(img), np.asarray(img1))
    yield assert_true(img is img2)
Example #26
0
def test_em_loglike6():
    """
    """
    dim = 1
    k = 1
    n = 100
    offset = 3.0
    x = nr.randn(n, dim)
    y = offset + nr.randn(n, dim)
    lgmm = GMM(k, dim)
    lgmm.initialize(x)
    lgmm.estimate(x)
    ll1 = lgmm.average_log_like(x)
    ll2 = lgmm.average_log_like(y)
    ent = 0.5 * (1 + np.log(2 * np.pi))
    dkl = 0.5 * offset ** 2
    print ll2, ll1, dkl
    assert_true(ll2 < ll1)
Example #27
0
def test_resid():
    # Data is projected onto k=10 dimensional subspace then has its mean
    # removed.  Should still have rank 10.
    k = 10
    ncomp = 5
    ntotal = k
    X = np.random.standard_normal((data['nimages'], k))
    p = pca(data['fmridata'], -1, ncomp=ncomp, design_resid=X)
    assert_equal(p['basis_vectors'].shape, (data['nimages'], ntotal))
    assert_equal(p['basis_projections'].shape, data['mask'].shape + (ncomp,))
    assert_equal(p['pcnt_var'].shape, (ntotal,))
    assert_almost_equal(p['pcnt_var'].sum(), 100.)
    # if design_resid is None, we do not remove the mean, and we get
    # full rank from our data
    p = pca(data['fmridata'], -1, design_resid=None)
    rank = p['basis_vectors'].shape[1]
    assert_equal(rank, data['nimages'])
    rarr = reconstruct(p['basis_vectors'], p['basis_projections'], -1)
    # add back the sqrt MSE, because we standardized
    rmse = root_mse(data['fmridata'], axis=-1)[...,None]
    assert_true(np.allclose(rarr * rmse, data['fmridata']))
Example #28
0
def test_mask():
    mean_image = np.ones((9, 9))
    mean_image[3:-3, 3:-3] = 10
    mean_image[5, 5] = 100
    mask1 = nnm.compute_mask(mean_image)
    mask2 = nnm.compute_mask(mean_image, exclude_zeros=True)
    # With an array with no zeros, exclude_zeros should not make
    # any difference
    assert_array_equal(mask1, mask2)
    # Check that padding with zeros does not change the extracted mask
    mean_image2 = np.zeros((30, 30))
    mean_image2[:9, :9] = mean_image
    mask3 = nnm.compute_mask(mean_image2, exclude_zeros=True)
    assert_array_equal(mask1, mask3[:9, :9])
    # However, without exclude_zeros, it does
    mask3 = nnm.compute_mask(mean_image2)
    assert_false(np.allclose(mask1, mask3[:9, :9]))
    # check that  opening is 2 by default
    mask4 = nnm.compute_mask(mean_image, exclude_zeros=True, opening=2)
    assert_array_equal(mask1, mask4)
    # check that opening has an effect
    mask5 = nnm.compute_mask(mean_image, exclude_zeros=True, opening=0)
    assert_true(mask5.sum() > mask4.sum())
Example #29
0
def test_mask():
    mean_image = np.ones((9, 9))
    mean_image[3:-3, 3:-3] = 10
    mean_image[5, 5] = 100
    mask1 = nnm.compute_mask(mean_image)
    mask2 = nnm.compute_mask(mean_image, exclude_zeros=True)
    # With an array with no zeros, exclude_zeros should not make
    # any difference
    assert_array_equal(mask1, mask2)
    # Check that padding with zeros does not change the extracted mask
    mean_image2 = np.zeros((30, 30))
    mean_image2[:9, :9] = mean_image
    mask3 = nnm.compute_mask(mean_image2, exclude_zeros=True)
    assert_array_equal(mask1, mask3[:9, :9])
    # However, without exclude_zeros, it does
    mask3 = nnm.compute_mask(mean_image2)
    assert_false(np.allclose(mask1, mask3[:9, :9]))
    # check that  opening is 2 by default
    mask4 = nnm.compute_mask(mean_image, exclude_zeros=True, opening=2)
    assert_array_equal(mask1, mask4)
    # check that opening has an effect
    mask5 = nnm.compute_mask(mean_image, exclude_zeros=True, opening=0)
    assert_true(mask5.sum() > mask4.sum())
Example #30
0
def test_em_gmm_full(verbose=0):
    # Computing the BIC value for different configurations of a GMM with
    # ful diagonal matrices The BIC should be maximal for a number of
    # classes of 1 or 2

    # generate some data
    dim = 2
    x = np.concatenate((nr.randn(100, dim), 3 + 2 * nr.randn(100, dim)))

    # estimate different GMMs of that data
    maxiter = 100
    delta = 1.0e-4

    bic = np.zeros(5)
    for k in range(1, 6):
        lgmm = GMM(k, dim)
        lgmm.initialize(x)
        bic[k - 1] = lgmm.estimate(x, maxiter, delta, verbose)
        if verbose:
            print "bic of the %d-classes model" % k, bic

    z = lgmm.map_label(x)
    assert_true(bic[4] < bic[1])
Example #31
0
def test_2D():
    # check that a standard 2D PCA works too
    M = 100
    N = 20
    L = M-1 # rank after mean removal
    data = np.random.uniform(size=(M, N))
    p = pca(data)
    ts = p['basis_vectors']
    imgs = p['basis_projections']
    assert_equal(ts.shape, (M, L))
    assert_equal(imgs.shape, (L, N))
    rimgs = reconstruct(ts, imgs)
    # add back the sqrt MSE, because we standardized
    data_mean = data.mean(0)[None,...]
    demeaned = data - data_mean
    rmse = root_mse(demeaned, axis=0)[None,...]
    # also add back the mean
    assert_array_almost_equal((rimgs * rmse) + data_mean, data)
    # if standardize is set, or not, covariance is diagonal
    assert_true(diagonal_covariance(imgs))
    p = pca(data, standardize=False)
    imgs = p['basis_projections']
    assert_true(diagonal_covariance(imgs))
Example #32
0
def test_2D():
    # check that a standard 2D PCA works too
    M = 100
    N = 20
    L = M-1 # rank after mean removal
    data = np.random.uniform(size=(M, N))
    p = pca(data)
    ts = p['basis_vectors']
    imgs = p['basis_projections']
    yield assert_equal(ts.shape, (M, L))
    yield assert_equal(imgs.shape, (L, N))
    rimgs = reconstruct(ts, imgs)
    # add back the sqrt MSE, because we standardized
    data_mean = data.mean(0)[None,...]
    demeaned = data - data_mean
    rmse = root_mse(demeaned, axis=0)[None,...]
    # also add back the mean
    yield assert_array_almost_equal((rimgs * rmse) + data_mean, data)
    # if standardize is set, or not, covariance is diagonal
    yield assert_true(diagonal_covariance(imgs))
    p = pca(data, standardize=False)
    imgs = p['basis_projections']
    yield assert_true(diagonal_covariance(imgs))
Example #33
0
def test_call():
    value = 10
    yield assert_true(np.allclose(E.a(value), 2*value))
    yield assert_true(np.allclose(E.b(value), 2*value))
    # FIXME: this shape just below is not
    # really expected for a CoordinateMap
    yield assert_true(np.allclose(E.b([value]), 2*value))
    yield assert_true(np.allclose(E.c(value), value/2))
    yield assert_true(np.allclose(E.d(value), value/2))
    value = np.array([1., 2., 3.])
    yield assert_true(np.allclose(E.e(value), value))
    # check that error raised for wrong shape
    value = np.array([1., 2.,])
    yield assert_raises(CoordinateSystemError, E.e, value)
Example #34
0
def test_inverse2():
    assert_true(np.allclose(E.e.affine, E.e.inverse().inverse().affine))
Example #35
0
def test_compose_cmap():
    value = np.array([1., 2., 3.])
    b = compose(E.e, E.e)
    assert_true(np.allclose(b(value), value))
Example #36
0
def test_diagonality():
    # basis_projections are diagonal, whether standarized or not
    p = pca(data['fmridata'], -1) # standardized
    assert_true(diagonal_covariance(p['basis_projections'], -1))
    pns = pca(data['fmridata'], -1, standardize=False) # not 
    assert_true(diagonal_covariance(pns['basis_projections'], -1))
Example #37
0
def test_diagonality():
    # basis_projections are diagonal, whether standarized or not
    p = pca(data['fmridata'], -1) # standardized
    yield assert_true(diagonal_covariance(p['basis_projections'], -1))
    pns = pca(data['fmridata'], -1, standardize=False) # not 
    yield assert_true(diagonal_covariance(pns['basis_projections'], -1))
Example #38
0
def test_design_expression():
    t1 = F.Term("x")
    t2 = F.Term('y')
    f = t1.formula + t2.formula
    assert_true(str(f.design_expr) in ['[x, y]', '[y, x]'])