Exemple #1
0
    def _run_interface(self, runtime):

        beta_nii = nb.load(self.inputs.beta)
        if isdefined(self.inputs.mask):
            mask = nb.load(self.inputs.mask).get_data() > 0
        else:
            mask = np.ones(beta_nii.shape[:3]) == 1


        glm = GLM.glm()
        nii = nb.load(self.inputs.beta)
        glm.beta = beta_nii.get_data().copy()[mask,:].T
        glm.nvbeta = self.inputs.nvbeta
        glm.s2 = nb.load(self.inputs.s2).get_data().copy()[mask]
        glm.dof = self.inputs.dof
        glm._axis = self.inputs.axis
        glm._constants = self.inputs.constants

        reg_names = self.inputs.reg_names

        self._stat_maps = []
        self._p_maps = []
        self._z_maps = []
        for contrast_def in self.inputs.contrasts:
            name = contrast_def[0]
            _ = contrast_def[1]
            contrast = np.zeros(len(reg_names))

            for i, reg_name in enumerate(reg_names):
                if reg_name in contrast_def[2]:
                    idx = contrast_def[2].index(reg_name)
                    contrast[i] = contrast_def[3][idx]

            est_contrast = glm.contrast(contrast)

            stat_map = np.zeros(mask.shape)
            stat_map[mask] = est_contrast.stat().T
            stat_map_file = os.path.abspath(name + "_stat_map.nii")
            nb.save(nb.Nifti1Image(stat_map, nii.get_affine()), stat_map_file)
            self._stat_maps.append(stat_map_file)

            p_map = np.zeros(mask.shape)
            p_map[mask] = est_contrast.pvalue().T
            p_map_file = os.path.abspath(name + "_p_map.nii")
            nb.save(nb.Nifti1Image(p_map, nii.get_affine()), p_map_file)
            self._p_maps.append(p_map_file)

            z_map = np.zeros(mask.shape)
            z_map[mask] = est_contrast.zscore().T
            z_map_file = os.path.abspath(name + "_z_map.nii")
            nb.save(nb.Nifti1Image(z_map, nii.get_affine()), z_map_file)
            self._z_maps.append(z_map_file)

        return runtime
Exemple #2
0
    def _run_interface(self, runtime):

        beta_nii = nb.load(self.inputs.beta)
        if isdefined(self.inputs.mask):
            mask = nb.load(self.inputs.mask).get_data() > 0
        else:
            mask = np.ones(beta_nii.shape[:3]) == 1


        glm = GLM.glm()
        nii = nb.load(self.inputs.beta)
        glm.beta = beta_nii.get_data().copy()[mask,:].T
        glm.nvbeta = self.inputs.nvbeta
        glm.s2 = nb.load(self.inputs.s2).get_data().copy()[mask]
        glm.dof = self.inputs.dof
        glm._axis = self.inputs.axis
        glm._constants = self.inputs.constants

        reg_names = self.inputs.reg_names

        self._stat_maps = []
        self._p_maps = []
        self._z_maps = []
        for contrast_def in self.inputs.contrasts:
            name = contrast_def[0]
            _ = contrast_def[1]
            contrast = np.zeros(len(reg_names))

            for i, reg_name in enumerate(reg_names):
                if reg_name in contrast_def[2]:
                    idx = contrast_def[2].index(reg_name)
                    contrast[i] = contrast_def[3][idx]

            est_contrast = glm.contrast(contrast)

            stat_map = np.zeros(mask.shape)
            stat_map[mask] = est_contrast.stat().T
            stat_map_file = os.path.abspath(name + "_stat_map.nii")
            nb.save(nb.Nifti1Image(stat_map, nii.get_affine()), stat_map_file)
            self._stat_maps.append(stat_map_file)

            p_map = np.zeros(mask.shape)
            p_map[mask] = est_contrast.pvalue().T
            p_map_file = os.path.abspath(name + "_p_map.nii")
            nb.save(nb.Nifti1Image(p_map, nii.get_affine()), p_map_file)
            self._p_maps.append(p_map_file)

            z_map = np.zeros(mask.shape)
            z_map[mask] = est_contrast.zscore().T
            z_map_file = os.path.abspath(name + "_z_map.nii")
            nb.save(nb.Nifti1Image(z_map, nii.get_affine()), z_map_file)
            self._z_maps.append(z_map_file)

        return runtime
Exemple #3
0
def glm_nipy(fmri_data, contrasts=None, hrf_model='Canonical',
             drift_model='Cosine', hfcut=128,
             residuals_model='spherical', fit_method='ols',
             fir_delays=[0],
             rescale_results=False, rescale_factor=None):

    """
    Perform a GLM analysis on fMRI data using the implementation of Nipy.

    Args:
        fmri_data (pyhrf.core.FmriData): the input fMRI data defining the
            paradigm and the measured 3D+time signal.
        contrasts (dict): keys are contrast labels and values are arithmetic
            expressions involving regressor names. Valid names are:
            * names of experimental conditions as defined in fmri_data
            * constant
        hrf_model: "Canonical", "Canonical with Derivative", "FIR"
        residuals_model: "spherical", "ar1"
        fit_method: "ols", "kalman" (If residuals_model is "ar1" then method
            is set to "kalman" and this argument is ignored)
        fir_delays: list of integers indicating the delay of each FIR coefficient
                    (in terms of scans). Eg if TR = 2s. and we want a FIR
                    duration of 20s.: fir_delays=range(10)
    Returns:
        (glm instance, design matrix, dict of contrasts of objects)

    Examples:
    >>> from pyhrf.core import FmriData
    >>> from pyhrf.glm import glm_nipy
    >>> g,dmtx,con = glm_nipy(FmriData.from_vol_ui())
    >>> g,dmtx,con = glm_nipy(FmriData.from_vol_ui(), \
                              contrasts={'A-V':'audio-video'})
    """

    paradigm = fmri_data.paradigm.to_nipy_paradigm()


    # BOLD data
    Y = fmri_data.bold.T
    n_scans = Y.shape[1]
    # pyhrf.verbose(1, 'Input BOLD: nvox=%d, nscans=%d' %Y.shape)

    # Design matrix
    frametimes = np.linspace(0, (n_scans-1)*fmri_data.tr, n_scans)
    design_matrix = dm.make_dmtx(frametimes, paradigm,
                                 hrf_model=hrf_model,
                                 drift_model=drift_model, hfcut=hfcut,
                                 fir_delays=fir_delays)

    ns, nr = design_matrix.matrix.shape
    pyhrf.verbose(2, 'Design matrix built with %d regressors:' %nr)
    for rn in design_matrix.names:
        pyhrf.verbose(2, '    - %s' %rn)

    # ax = design_matrix.show()
    # ax.set_position([.05, .25, .9, .65])
    # ax.set_title('Design matrix')
    # plt.savefig(op.join(output_dir, 'design_matrix.png'))

    # GLM fit
    my_glm = glm.glm()
    pyhrf.verbose(2, 'Fit GLM - method: %s, residual model: %s' \
                      %(fit_method,residuals_model))
    my_glm.fit(Y.T, design_matrix.matrix, method=fit_method,
               model=residuals_model)

    from pyhrf.tools import map_dict
    from pyhrf.paradigm import contrasts_to_spm_vec

    if rescale_results:

        # Rescale by the norm of the HRF:
        # from nipy.modalities.fmri.hemodynamic_models import _hrf_kernel, \
        #     sample_condition
        # oversampling = 16
        # hrfs = _hrf_kernel(hrf_model, fmri_data.tr, oversampling,
        #                    fir_delays=fir_delays)
        # hframetimes = np.linspace(0, 32., int(32./fmri_data.tr))
        # hr_regressor, hr_frametimes = sample_condition(
        #     (np.array([0]),np.array([0]),np.array([1])),
        #     hframetimes, oversampling)
        # from scipy.interpolate import interp1d
        # for i in xrange(len(hrfs)):
        #     f = interp1d(hr_frametimes, hrfs[i])
        #     hrfs[i] = f(hframetimes).T

        # n_conds = len(fmri_data.paradigm.stimOnsets)
        # for i in xrange(n_conds * len(hrfs)):
        #     h = hrfs[i%len(hrfs)]
        #     my_glm.beta[i] = my_glm.beta[i] * (h**2).sum()**.5

        #my_glm.variance = np.zeros_like(my_glm.beta)
        if 1:
            if rescale_results and rescale_factor is None:
                #Rescale by the norm of each regressor in the design matrix
                dm_reg_norms = (design_matrix.matrix**2).sum(0)**.5
                pyhrf.verbose(2,'GLM results (beta and con effects) are '\
                                  'rescaled by reg norm. Weights: %s ' \
                                  %str(dm_reg_norms))
    
                for ib in xrange(my_glm.beta.shape[0]):
                    my_glm.beta[ib] = my_glm.beta[ib] * dm_reg_norms[ib]
                    #my_glm.nvbeta[ib,:] = my_glm.nvbeta[ib,:] * dm_reg_norms[ib]**2
    
            else:
                pyhrf.verbose(2,'GLM results (beta and con effects) are '\
                                  'rescaled by input scale factor.')
    
                # Use input rescale factors:
                for ib in xrange(rescale_factor.shape[0]):
                    my_glm.beta[ib] = my_glm.beta[ib] * rescale_factor[ib]
                    #TOCHECK: nvbeta seems to be a covar matrix between reg
                    # -> we dont get position-specific variances ...
                    #my_glm.nvbeta[ib,:] = my_glm.nvbeta[ib,:] * rescale_factor[ib]**2
    
    if contrasts is not None:
        con_vectors = contrasts_to_spm_vec(design_matrix.names, contrasts)
        # if rescale_results:
        #     for con_vec in con_vectors.itervalues():
        #         con_vec *= dm_reg_norms
        contrast_result = map_dict(my_glm.contrast, con_vectors)
    else:
        contrast_result = None


    return my_glm, design_matrix, contrast_result

#actually: not possible to compute PPM from glm results
#Should relaunch estimation with propoer model under SPM
#def PPMcalculus_glmWN(beta, var_beta, dm, threshold_value):
    '''
Exemple #4
0
    def _run_interface(self, runtime):

        session_info = self.inputs.session_info

        functional_runs = self.inputs.session_info[0]['scans']
        if isinstance(functional_runs, str):
            functional_runs = [functional_runs]
        nii = nb.load(functional_runs[0])
        data = nii.get_data()


        if isdefined(self.inputs.mask):
            mask = nb.load(self.inputs.mask).get_data() > 0
        else:
            mask = np.ones(nii.shape[:3]) == 1

        timeseries = data.copy()[mask,:]
        del data

        for functional_run in functional_runs[1:]:
            nii = nb.load(functional_run)
            data = nii.get_data()
            npdata = data.copy()
            del data
            timeseries = np.concatenate((timeseries,npdata[mask,:]), axis=1)
            del npdata

        nscans = timeseries.shape[1]

        if 'hpf' in session_info[0].keys():
            hpf = session_info[0]['hpf']
            drift_model=self.inputs.drift_model
        else:
            hpf=0
            drift_model = "Blank"

        reg_names = []
        for reg in session_info[0]['regress']:
            reg_names.append(reg['name'])

        reg_vals = np.zeros((nscans,len(reg_names)))
        for i in range(len(reg_names)):
            reg_vals[:,i] = np.array(session_info[0]['regress'][i]['val']).reshape(1,-1)


        frametimes= np.linspace(0, (nscans-1)*self.inputs.TR, nscans)

        conditions = []
        onsets = []
        duration = []

        for i,cond in enumerate(session_info[0]['cond']):
            onsets += cond['onset']
            conditions += [cond['name']]*len(cond['onset'])
            if len(cond['duration']) == 1:
                duration += cond['duration']*len(cond['onset'])
            else:
                duration += cond['duration']


        if conditions:
            paradigm =  BlockParadigm(con_id=conditions, onset=onsets, duration=duration)
        else:
            paradigm = None
        design_matrix, self._reg_names = dm.dmtx_light(frametimes, paradigm, drift_model=drift_model, hfcut=hpf,
               hrf_model=self.inputs.hrf_model,
               add_regs=reg_vals,
               add_reg_names=reg_names
               )
        if self.inputs.normalize_design_matrix:
            for i in range(len(self._reg_names)-1):
                design_matrix[:,i] = (design_matrix[:,i]-design_matrix[:,i].mean())/design_matrix[:,i].std()

        if self.inputs.plot_design_matrix:
            if pylab_available:
                pylab.pcolor(design_matrix)
                pylab.savefig("design_matrix.pdf")
                pylab.close()
                pylab.clf()
            else:
                Exception('Pylab not available for saving design matrix image')

        glm = GLM.glm()
        glm.fit(timeseries.T, design_matrix, method=self.inputs.method, model=self.inputs.model)


        self._beta_file = os.path.abspath("beta.nii")
        beta = np.zeros(mask.shape + (glm.beta.shape[0],))
        beta[mask,:] = glm.beta.T
        nb.save(nb.Nifti1Image(beta, nii.get_affine()), self._beta_file)

        self._s2_file = os.path.abspath("s2.nii")
        s2 = np.zeros(mask.shape)
        s2[mask] = glm.s2
        nb.save(nb.Nifti1Image(s2, nii.get_affine()), self._s2_file)

        if self.inputs.save_residuals:
            explained = np.dot(design_matrix,glm.beta)
            residuals = np.zeros(mask.shape + (nscans,))
            residuals[mask,:] = timeseries - explained.T
            self._residuals_file = os.path.abspath("residuals.nii")
            nb.save(nb.Nifti1Image(residuals, nii.get_affine()), self._residuals_file)

        self._nvbeta = glm.nvbeta
        self._dof = glm.dof
        self._constants = glm._constants
        self._axis = glm._axis
        if self.inputs.model == "ar1":
            self._a_file = os.path.abspath("a.nii")
            a = np.zeros(mask.shape)
            a[mask] = glm.a.squeeze()
            nb.save(nb.Nifti1Image(a, nii.get_affine()), self._a_file)
        self._model = glm.model
        self._method = glm.method

        return runtime
Exemple #5
0
def glm_nipy(fmri_data, contrasts=None, hrf_model='Canonical',
             drift_model='Cosine', hfcut=128,
             residuals_model='spherical', fit_method='ols',
             fir_delays=[0],
             rescale_results=False, rescale_factor=None):
    """
    Perform a GLM analysis on fMRI data using the implementation of Nipy.

    Args:
        fmri_data (pyhrf.core.FmriData): the input fMRI data defining the
            paradigm and the measured 3D+time signal.
        contrasts (dict): keys are contrast labels and values are arithmetic
            expressions involving regressor names. Valid names are:
            * names of experimental conditions as defined in fmri_data
            * constant
        hrf_model: "Canonical", "Canonical with Derivative", "FIR"
        residuals_model: "spherical", "ar1"
        fit_method: "ols", "kalman" (If residuals_model is "ar1" then method
            is set to "kalman" and this argument is ignored)
        fir_delays: list of integers indicating the delay of each FIR coefficient
                    (in terms of scans). Eg if TR = 2s. and we want a FIR
                    duration of 20s.: fir_delays=range(10)
    Returns:
        (glm instance, design matrix, dict of contrasts of objects)

    Examples:
    >>> from pyhrf.core import FmriData
    >>> from pyhrf.glm import glm_nipy
    >>> g,dmtx,con = glm_nipy(FmriData.from_vol_ui())
    >>> g,dmtx,con = glm_nipy(FmriData.from_vol_ui(), \
                              contrasts={'A-V':'audio-video'})
    """

    paradigm = fmri_data.paradigm.to_nipy_paradigm()

    # BOLD data
    Y = fmri_data.bold.T
    n_scans = Y.shape[1]

    # Design matrix
    frametimes = np.linspace(0, (n_scans - 1) * fmri_data.tr, n_scans)
    design_matrix = dm.make_dmtx(frametimes, paradigm,
                                 hrf_model=hrf_model,
                                 drift_model=drift_model, hfcut=hfcut,
                                 fir_delays=fir_delays)

    ns, nr = design_matrix.matrix.shape
    logger.info('Design matrix built with %d regressors:', nr)
    for rn in design_matrix.names:
        logger.info('    - %s', rn)

    # GLM fit
    my_glm = glm.glm()
    logger.info('Fit GLM - method: %s, residual model: %s', fit_method,
                residuals_model)
    my_glm.fit(Y.T, design_matrix.matrix, method=fit_method,
               model=residuals_model)

    from pyhrf.tools import map_dict
    from pyhrf.paradigm import contrasts_to_spm_vec

    if rescale_results:
        if 1:
            if rescale_results and rescale_factor is None:
                # Rescale by the norm of each regressor in the design matrix
                dm_reg_norms = (design_matrix.matrix ** 2).sum(0) ** .5
                logger.info('GLM results (beta and con effects) are '
                            'rescaled by reg norm. Weights: %s ',
                            str(dm_reg_norms))

                for ib in xrange(my_glm.beta.shape[0]):
                    my_glm.beta[ib] = my_glm.beta[ib] * dm_reg_norms[ib]

            else:
                logger.info('GLM results (beta and con effects) are '
                            'rescaled by input scale factor.')

                # Use input rescale factors:
                for ib in xrange(rescale_factor.shape[0]):
                    my_glm.beta[ib] = my_glm.beta[ib] * rescale_factor[ib]
                    # TOCHECK: nvbeta seems to be a covar matrix between reg
                    # -> we dont get position-specific variances ...
                    #my_glm.nvbeta[ib,:] = my_glm.nvbeta[ib,:] * rescale_factor[ib]**2

    if contrasts is not None:
        con_vectors = contrasts_to_spm_vec(design_matrix.names, contrasts)
        # if rescale_results:
        #     for con_vec in con_vectors.itervalues():
        #         con_vec *= dm_reg_norms
        contrast_result = map_dict(my_glm.contrast, con_vectors)
    else:
        contrast_result = None

    return my_glm, design_matrix, contrast_result

# actually: not possible to compute PPM from glm results
# Should relaunch estimation with propoer model under SPM
# def PPMcalculus_glmWN(beta, var_beta, dm, threshold_value):
    '''
Exemple #6
0
    def _run_interface(self, runtime):

        session_info = self.inputs.session_info

        functional_runs = self.inputs.session_info[0]['scans']
        if isinstance(functional_runs, str):
            functional_runs = [functional_runs]
        nii = nb.load(functional_runs[0])
        data = nii.get_data()

        if isdefined(self.inputs.mask):
            mask = nb.load(self.inputs.mask).get_data() > 0
        else:
            mask = np.ones(nii.shape[:3]) == 1

        timeseries = data.copy()[mask, :]
        del data

        for functional_run in functional_runs[1:]:
            nii = nb.load(functional_run)
            data = nii.get_data()
            npdata = data.copy()
            del data
            timeseries = np.concatenate((timeseries, npdata[mask, :]), axis=1)
            del npdata

        nscans = timeseries.shape[1]

        if 'hpf' in session_info[0].keys():
            hpf = session_info[0]['hpf']
            drift_model = self.inputs.drift_model
        else:
            hpf = 0
            drift_model = "Blank"

        reg_names = []
        for reg in session_info[0]['regress']:
            reg_names.append(reg['name'])

        reg_vals = np.zeros((nscans, len(reg_names)))
        for i in range(len(reg_names)):
            reg_vals[:, i] = np.array(
                session_info[0]['regress'][i]['val']).reshape(1, -1)

        frametimes = np.linspace(0, (nscans - 1) * self.inputs.TR, nscans)

        conditions = []
        onsets = []
        duration = []

        for i, cond in enumerate(session_info[0]['cond']):
            onsets += cond['onset']
            conditions += [cond['name']] * len(cond['onset'])
            if len(cond['duration']) == 1:
                duration += cond['duration'] * len(cond['onset'])
            else:
                duration += cond['duration']

        if conditions:
            paradigm = BlockParadigm(con_id=conditions,
                                     onset=onsets,
                                     duration=duration)
        else:
            paradigm = None
        design_matrix, self._reg_names = dm.dmtx_light(
            frametimes,
            paradigm,
            drift_model=drift_model,
            hfcut=hpf,
            hrf_model=self.inputs.hrf_model,
            add_regs=reg_vals,
            add_reg_names=reg_names)
        if self.inputs.normalize_design_matrix:
            for i in range(len(self._reg_names) - 1):
                design_matrix[:, i] = (
                    design_matrix[:, i] -
                    design_matrix[:, i].mean()) / design_matrix[:, i].std()

        if self.inputs.plot_design_matrix:
            import pylab
            pylab.pcolor(design_matrix)
            pylab.savefig("design_matrix.pdf")
            pylab.close()
            pylab.clf()

        glm = GLM.glm()
        glm.fit(timeseries.T,
                design_matrix,
                method=self.inputs.method,
                model=self.inputs.model)

        self._beta_file = os.path.abspath("beta.nii")
        beta = np.zeros(mask.shape + (glm.beta.shape[0], ))
        beta[mask, :] = glm.beta.T
        nb.save(nb.Nifti1Image(beta, nii.get_affine()), self._beta_file)

        self._s2_file = os.path.abspath("s2.nii")
        s2 = np.zeros(mask.shape)
        s2[mask] = glm.s2
        nb.save(nb.Nifti1Image(s2, nii.get_affine()), self._s2_file)

        if self.inputs.save_residuals:
            explained = np.dot(design_matrix, glm.beta)
            residuals = np.zeros(mask.shape + (nscans, ))
            residuals[mask, :] = timeseries - explained.T
            self._residuals_file = os.path.abspath("residuals.nii")
            nb.save(nb.Nifti1Image(residuals, nii.get_affine()),
                    self._residuals_file)

        self._nvbeta = glm.nvbeta
        self._dof = glm.dof
        self._constants = glm._constants
        self._axis = glm._axis
        if self.inputs.model == "ar1":
            self._a_file = os.path.abspath("a.nii")
            a = np.zeros(mask.shape)
            a[mask] = glm.a.squeeze()
            nb.save(nb.Nifti1Image(a, nii.get_affine()), self._a_file)
        self._model = glm.model
        self._method = glm.method

        return runtime
Exemple #7
0
def glm_nipy(fmri_data,
             contrasts=None,
             hrf_model='Canonical',
             drift_model='Cosine',
             hfcut=128,
             residuals_model='spherical',
             fit_method='ols',
             fir_delays=[0],
             rescale_results=False,
             rescale_factor=None):
    """
    Perform a GLM analysis on fMRI data using the implementation of Nipy.

    Args:
        fmri_data (pyhrf.core.FmriData): the input fMRI data defining the
            paradigm and the measured 3D+time signal.
        contrasts (dict): keys are contrast labels and values are arithmetic
            expressions involving regressor names. Valid names are:
            * names of experimental conditions as defined in fmri_data
            * constant
        hrf_model: "Canonical", "Canonical with Derivative", "FIR"
        residuals_model: "spherical", "ar1"
        fit_method: "ols", "kalman" (If residuals_model is "ar1" then method
            is set to "kalman" and this argument is ignored)
        fir_delays: list of integers indicating the delay of each FIR coefficient
                    (in terms of scans). Eg if TR = 2s. and we want a FIR
                    duration of 20s.: fir_delays=range(10)
    Returns:
        (glm instance, design matrix, dict of contrasts of objects)

    Examples:
    >>> from pyhrf.core import FmriData
    >>> from pyhrf.glm import glm_nipy
    >>> g,dmtx,con = glm_nipy(FmriData.from_vol_ui())
    >>> g,dmtx,con = glm_nipy(FmriData.from_vol_ui(), \
                              contrasts={'A-V':'audio-video'})
    """

    paradigm = fmri_data.paradigm.to_nipy_paradigm()

    # BOLD data
    Y = fmri_data.bold.T
    n_scans = Y.shape[1]

    # Design matrix
    frametimes = np.linspace(0, (n_scans - 1) * fmri_data.tr, n_scans)
    design_matrix = dm.make_dmtx(frametimes,
                                 paradigm,
                                 hrf_model=hrf_model,
                                 drift_model=drift_model,
                                 hfcut=hfcut,
                                 fir_delays=fir_delays)

    ns, nr = design_matrix.matrix.shape
    logger.info('Design matrix built with %d regressors:', nr)
    for rn in design_matrix.names:
        logger.info('    - %s', rn)

    # GLM fit
    my_glm = glm.glm()
    logger.info('Fit GLM - method: %s, residual model: %s', fit_method,
                residuals_model)
    my_glm.fit(Y.T,
               design_matrix.matrix,
               method=fit_method,
               model=residuals_model)

    from pyhrf.tools import map_dict
    from pyhrf.paradigm import contrasts_to_spm_vec

    if rescale_results:
        if 1:
            if rescale_results and rescale_factor is None:
                # Rescale by the norm of each regressor in the design matrix
                dm_reg_norms = (design_matrix.matrix**2).sum(0)**.5
                logger.info(
                    'GLM results (beta and con effects) are '
                    'rescaled by reg norm. Weights: %s ', str(dm_reg_norms))

                for ib in xrange(my_glm.beta.shape[0]):
                    my_glm.beta[ib] = my_glm.beta[ib] * dm_reg_norms[ib]

            else:
                logger.info('GLM results (beta and con effects) are '
                            'rescaled by input scale factor.')

                # Use input rescale factors:
                for ib in xrange(rescale_factor.shape[0]):
                    my_glm.beta[ib] = my_glm.beta[ib] * rescale_factor[ib]
                    # TOCHECK: nvbeta seems to be a covar matrix between reg
                    # -> we dont get position-specific variances ...
                    #my_glm.nvbeta[ib,:] = my_glm.nvbeta[ib,:] * rescale_factor[ib]**2

    if contrasts is not None:
        con_vectors = contrasts_to_spm_vec(design_matrix.names, contrasts)
        # if rescale_results:
        #     for con_vec in con_vectors.itervalues():
        #         con_vec *= dm_reg_norms
        contrast_result = map_dict(my_glm.contrast, con_vectors)
    else:
        contrast_result = None

    return my_glm, design_matrix, contrast_result

    # actually: not possible to compute PPM from glm results
    # Should relaunch estimation with propoer model under SPM
    # def PPMcalculus_glmWN(beta, var_beta, dm, threshold_value):
    '''