Ejemplo n.º 1
0
def test_estimate_varatio(p=1.0e-04, sigma2=1):
    # This is a random test, but is design to fail only rarely....
    ntrial = 300
    n = 10
    random = np.zeros(10)
    rsd = np.zeros(n)
    sd = np.multiply.outer(np.linspace(0, 1, 40), np.ones(ntrial)) + np.ones(
        (40, ntrial))

    for i in range(n):
        Y = np.random.standard_normal((40, ntrial)) * np.sqrt((sd**2 + sigma2))
        results = onesample.estimate_varatio(Y, sd)
        results = onesample.estimate_varatio(Y, sd)
        random[i] = results['random'].mean()
        rsd[i] = results['random'].std()

    # Compute the mean just to be sure it works

    W = 1. / (sd**2 + results['random'])
    mu = onesample.estimate_mean(Y,
                                 np.sqrt(sd**2 + results['random']))['effect']
    yield assert_almost_equal, mu, (W * Y).sum(0) / W.sum(0)

    rsd = np.sqrt((rsd**2).mean() / ntrial)
    T = np.fabs((random.mean() - sigma2) / (rsd / np.sqrt(n)))

    # should fail one in every 1/p trials at least for sigma > 0,
    # small values of sigma seem to have some bias
    if T > norm.ppf(1 - p / 2):
        raise ValueError('large T value, but algorithm works, '
                         'could be a statistical failure')
Ejemplo n.º 2
0
def test_estimate_varatio(p=1.0e-04, sigma2=1):
    # This is a random test, but is design to fail only rarely....
    ntrial = 300
    n = 10
    random = np.zeros(10)
    rsd = np.zeros(n)
    sd = np.multiply.outer(
        np.linspace(0,1,40),
        np.ones(ntrial)
        ) + np.ones((40,ntrial)) 

    for i in range(n):
        Y = np.random.standard_normal((40,ntrial)) * np.sqrt((sd**2 + sigma2))
        results = onesample.estimate_varatio(Y, sd)
        results = onesample.estimate_varatio(Y, sd)
        random[i] = results['random'].mean()
        rsd[i] = results['random'].std()

    # Compute the mean just to be sure it works

    W = 1. / (sd**2 + results['random'])
    mu = onesample.estimate_mean(Y, np.sqrt(sd**2 + results['random']))['effect']
    yield assert_almost_equal, mu, (W*Y).sum(0) / W.sum(0)

    rsd = np.sqrt((rsd**2).mean() / ntrial)
    T = np.fabs((random.mean() - sigma2) / (rsd / np.sqrt(n)))
    
    # should fail one in every 1/p trials at least for sigma > 0,
    # small values of sigma seem to have some bias
    if T > norm.ppf(1-p/2):
        raise ValueError('large T value, but algorithm works, '
                         'could be a statistical failure')
Ejemplo n.º 3
0
def group_analysis(design, contrast):
    """ Compute group analysis effect, t, sd for `design` and `contrast`

    Saves to disk in 'group' analysis directory

    Parameters
    ----------
    design : {'block', 'event'}
    contrast : str
        contrast name
    """
    array = np.array # shorthand
    # Directory where output will be written
    odir = futil.ensure_dir(futil.DATADIR, 'group', design, contrast)

    # Which subjects have this (contrast, design) pair?
    subj_con_dirs = futil.subj_des_con_dirs(design, contrast)
    if len(subj_con_dirs) == 0:
        raise ValueError('No subjects for %s, %s' % (design, contrast))

    # Assemble effects and sds into 4D arrays
    sds = []
    Ys = []
    for s in subj_con_dirs:
        sd_img = load_image(pjoin(s, "sd.nii"))
        effect_img = load_image(pjoin(s, "effect.nii"))
        sds.append(sd_img.get_data())
        Ys.append(effect_img.get_data())
    sd = array(sds)
    Y = array(Ys)

    # This function estimates the ratio of the fixed effects variance
    # (sum(1/sd**2, 0)) to the estimated random effects variance
    # (sum(1/(sd+rvar)**2, 0)) where rvar is the random effects variance.

    # The EM algorithm used is described in:
    #
    # Worsley, K.J., Liao, C., Aston, J., Petre, V., Duncan, G.H.,
    #    Morales, F., Evans, A.C. (2002). \'A general statistical
    #    analysis for fMRI data\'. NeuroImage, 15:1-15
    varest = onesample.estimate_varatio(Y, sd)
    random_var = varest['random']

    # XXX - if we have a smoother, use
    # random_var = varest['fixed'] * smooth(varest['ratio'])

    # Having estimated the random effects variance (and possibly smoothed it),
    # the corresponding estimate of the effect and its variance is computed and
    # saved.

    # This is the coordmap we will use
    coordmap = futil.load_image_fiac("fiac_00","wanatomical.nii").coordmap

    adjusted_var = sd**2 + random_var
    adjusted_sd = np.sqrt(adjusted_var)

    results = onesample.estimate_mean(Y, adjusted_sd) 
    for n in ['effect', 'sd', 't']:
        im = api.Image(results[n], copy(coordmap))
        save_image(im, pjoin(odir, "%s.nii" % n))
Ejemplo n.º 4
0
def group_analysis(design, contrast):
    """ Compute group analysis effect, t, sd for `design` and `contrast`

    Saves to disk in 'group' analysis directory

    Parameters
    ----------
    design : {'block', 'event'}
    contrast : str
        contrast name
    """
    array = np.array  # shorthand
    # Directory where output will be written
    odir = futil.ensure_dir(futil.DATADIR, 'group', design, contrast)

    # Which subjects have this (contrast, design) pair?
    subj_con_dirs = futil.subj_des_con_dirs(design, contrast)
    if len(subj_con_dirs) == 0:
        raise ValueError('No subjects for %s, %s' % (design, contrast))

    # Assemble effects and sds into 4D arrays
    sds = []
    Ys = []
    for s in subj_con_dirs:
        sd_img = load_image(pjoin(s, "sd.nii"))
        effect_img = load_image(pjoin(s, "effect.nii"))
        sds.append(sd_img.get_data())
        Ys.append(effect_img.get_data())
    sd = array(sds)
    Y = array(Ys)

    # This function estimates the ratio of the fixed effects variance
    # (sum(1/sd**2, 0)) to the estimated random effects variance
    # (sum(1/(sd+rvar)**2, 0)) where rvar is the random effects variance.

    # The EM algorithm used is described in:
    #
    # Worsley, K.J., Liao, C., Aston, J., Petre, V., Duncan, G.H.,
    #    Morales, F., Evans, A.C. (2002). \'A general statistical
    #    analysis for fMRI data\'. NeuroImage, 15:1-15
    varest = onesample.estimate_varatio(Y, sd)
    random_var = varest['random']

    # XXX - if we have a smoother, use
    # random_var = varest['fixed'] * smooth(varest['ratio'])

    # Having estimated the random effects variance (and possibly smoothed it),
    # the corresponding estimate of the effect and its variance is computed and
    # saved.

    # This is the coordmap we will use
    coordmap = futil.load_image_ds105("fiac_00", "wanatomical.nii").coordmap

    adjusted_var = sd**2 + random_var
    adjusted_sd = np.sqrt(adjusted_var)

    results = onesample.estimate_mean(Y, adjusted_sd)
    for n in ['effect', 'sd', 't']:
        im = api.Image(results[n], copy(coordmap))
        save_image(im, pjoin(odir, "%s.nii" % n))
Ejemplo n.º 5
0
def group_analysis_signs(design, contrast, mask, signs=None):
    """
    This function refits the EM model with a vector of signs.
    Used in the permutation tests.

    Returns the maximum of the T-statistic within mask

    Parameters
    ----------

    design: one of 'block', 'event'

    contrast: str

    mask: array-like

    signs: ndarray, optional
         Defaults to np.ones. Should have shape (*,nsubj)
         where nsubj is the number of effects combined in the group analysis.

    Returns
    -------

    minT: np.ndarray, minima of T statistic within mask, one for each
         vector of signs

    maxT: np.ndarray, maxima of T statistic within mask, one for each
         vector of signs
    
    """

    maska = np.asarray(mask).astype(np.bool)

    # Which subjects have this (contrast, design) pair?

    subjects = futil.subject_dirs(design, contrast)

    sd = np.array([np.array(load_image(pjoin(s, "sd.nii")))[:,maska]
                   for s in subjects])
    Y = np.array([np.array(load_image(pjoin(s, "effect.nii")))[:,maska]
                  for s in subjects])

    if signs is None:
        signs = np.ones((1, Y.shape[0]))

    maxT = np.empty(signs.shape[0])
    minT = np.empty(signs.shape[0])

    for i, sign in enumerate(signs):
        signY = sign[:,np.newaxis] * Y
        varest = onesample.estimate_varatio(signY, sd)
        random_var = varest['random']

        adjusted_var = sd**2 + random_var
        adjusted_sd = np.sqrt(adjusted_var)

        results = onesample.estimate_mean(Y, adjusted_sd) 
        T = results['t']
        minT[i], maxT[i] = np.nanmin(T), np.nanmax(T)
    return minT, maxT
Ejemplo n.º 6
0
def group_analysis(design, contrast):
    """
    Compute group analysis effect, sd and t
    for a given contrast and design type
    """
    array = np.array # shorthand
    # Directory where output will be written
    odir = futil.ensure_dir(futil.DATADIR, 'group', design, contrast)

    # Which subjects have this (contrast, design) pair?
    subjects = futil.subject_dirs(design, contrast)

    sd = array([array(load_image(pjoin(s, "sd.nii"))) for s in subjects])
    Y = array([array(load_image(pjoin(s, "effect.nii"))) for s in subjects])

    # This function estimates the ratio of the
    # fixed effects variance (sum(1/sd**2, 0))
    # to the estimated random effects variance
    # (sum(1/(sd+rvar)**2, 0)) where
    # rvar is the random effects variance.

    # The EM algorithm used is described in 
    #
    # Worsley, K.J., Liao, C., Aston, J., Petre, V., Duncan, G.H., 
    #    Morales, F., Evans, A.C. (2002). \'A general statistical 
    #    analysis for fMRI data\'. NeuroImage, 15:1-15

    varest = onesample.estimate_varatio(Y, sd)
    random_var = varest['random']

    # XXX - if we have a smoother, use
    # random_var = varest['fixed'] * smooth(varest['ratio'])

    # Having estimated the random effects variance (and
    # possibly smoothed it), the corresponding
    # estimate of the effect and its variance is
    # computed and saved.

    # This is the coordmap we will use
    coordmap = futil.load_image_fiac("fiac_00","wanatomical.nii").coordmap

    adjusted_var = sd**2 + random_var
    adjusted_sd = np.sqrt(adjusted_var)

    results = onesample.estimate_mean(Y, adjusted_sd) 
    for n in ['effect', 'sd', 't']:
        im = api.Image(results[n], coordmap.copy())
        save_image(im, pjoin(odir, "%s.nii" % n))
Ejemplo n.º 7
0
def group_analysis_signs(design, contrast, mask, signs=None):
    """ Refit the EM model with a vector of signs.

    Used in the permutation tests.

    Returns the maximum of the T-statistic within mask

    Parameters
    ----------
    design: one of 'block', 'event'
    contrast: str
        name of contrast to estimate
    mask : ``Image`` instance or array-like
        image containing mask, or array-like
    signs: ndarray, optional
         Defaults to np.ones. Should have shape (*,nsubj)
         where nsubj is the number of effects combined in the group analysis.

    Returns
    -------
    minT: np.ndarray, minima of T statistic within mask, one for each
         vector of signs
    maxT: np.ndarray, maxima of T statistic within mask, one for each
         vector of signs
    """
    if api.is_image(mask):
        maska = mask.get_data()
    else:
        maska = np.asarray(mask)
    maska = maska.astype(np.bool)

    # Which subjects have this (contrast, design) pair?
    subj_con_dirs = futil.subj_des_con_dirs(design, contrast)

    # Assemble effects and sds into 4D arrays
    sds = []
    Ys = []
    for s in subj_con_dirs:
        sd_img = load_image(pjoin(s, "sd.nii"))
        effect_img = load_image(pjoin(s, "effect.nii"))
        sds.append(sd_img.get_data()[maska])
        Ys.append(effect_img.get_data()[maska])
    sd = np.array(sds)
    Y = np.array(Ys)

    if signs is None:
        signs = np.ones((1, Y.shape[0]))

    maxT = np.empty(signs.shape[0])
    minT = np.empty(signs.shape[0])

    for i, sign in enumerate(signs):
        signY = sign[:,np.newaxis] * Y
        varest = onesample.estimate_varatio(signY, sd)
        random_var = varest['random']

        adjusted_var = sd**2 + random_var
        adjusted_sd = np.sqrt(adjusted_var)

        results = onesample.estimate_mean(Y, adjusted_sd) 
        T = results['t']
        minT[i], maxT[i] = np.nanmin(T), np.nanmax(T)
    return minT, maxT
Ejemplo n.º 8
0
def group_analysis_signs(design, contrast, mask, signs=None):
    """ Refit the EM model with a vector of signs.

    Used in the permutation tests.

    Returns the maximum of the T-statistic within mask

    Parameters
    ----------
    design: one of 'block', 'event'
    contrast: str
        name of contrast to estimate
    mask : ``Image`` instance or array-like
        image containing mask, or array-like
    signs: ndarray, optional
         Defaults to np.ones. Should have shape (*,nsubj)
         where nsubj is the number of effects combined in the group analysis.

    Returns
    -------
    minT: np.ndarray, minima of T statistic within mask, one for each
         vector of signs
    maxT: np.ndarray, maxima of T statistic within mask, one for each
         vector of signs
    """
    if api.is_image(mask):
        maska = mask.get_data()
    else:
        maska = np.asarray(mask)
    maska = maska.astype(np.bool)

    # Which subjects have this (contrast, design) pair?
    subj_con_dirs = futil.subj_des_con_dirs(design, contrast)

    # Assemble effects and sds into 4D arrays
    sds = []
    Ys = []
    for s in subj_con_dirs:
        sd_img = load_image(pjoin(s, "sd.nii"))
        effect_img = load_image(pjoin(s, "effect.nii"))
        sds.append(sd_img.get_data()[maska])
        Ys.append(effect_img.get_data()[maska])
    sd = np.array(sds)
    Y = np.array(Ys)

    if signs is None:
        signs = np.ones((1, Y.shape[0]))

    maxT = np.empty(signs.shape[0])
    minT = np.empty(signs.shape[0])

    for i, sign in enumerate(signs):
        signY = sign[:, np.newaxis] * Y
        varest = onesample.estimate_varatio(signY, sd)
        random_var = varest['random']

        adjusted_var = sd**2 + random_var
        adjusted_sd = np.sqrt(adjusted_var)

        results = onesample.estimate_mean(Y, adjusted_sd)
        T = results['t']
        minT[i], maxT[i] = np.nanmin(T), np.nanmax(T)
    return minT, maxT